hexsha
stringlengths
40
40
size
int64
3
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
972
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
972
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
972
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
3
1.03M
avg_line_length
float64
1.13
941k
max_line_length
int64
2
941k
alphanum_fraction
float64
0
1
02b7a353f4235e6cd31840307b143e00b9620e1e
1,000
py
Python
day-08/part-1/loic.py
lypnol/adventofcode-2017
03ced3df3eb80e5c7965c4120e3932919067cb15
[ "MIT" ]
16
2017-12-02T11:56:25.000Z
2018-02-10T15:09:23.000Z
day-08/part-1/loic.py
lypnol/adventofcode-2017
03ced3df3eb80e5c7965c4120e3932919067cb15
[ "MIT" ]
19
2017-12-01T07:54:22.000Z
2017-12-19T17:41:02.000Z
day-08/part-1/loic.py
lypnol/adventofcode-2017
03ced3df3eb80e5c7965c4120e3932919067cb15
[ "MIT" ]
4
2017-12-04T23:58:12.000Z
2018-02-01T08:53:16.000Z
from submission import Submission class LoicSubmission(Submission): memory = {} def run(self, s): self.memory = {} for line in s.split("\n"): data = line.split() check = False if data[5] == "<": if self.get(data[4]) < int(data[6]): check = True elif data[5] == ">": if self.get(data[4]) > int(data[6]): check = True elif data[5] == "<=": if self.get(data[4]) <= int(data[6]): check = True elif data[5] == ">=": if self.get(data[4]) >= int(data[6]): check = True elif data[5] == "==": if self.get(data[4]) == int(data[6]): check = True else: if self.get(data[4]) != int(data[6]): check = True if check: if data[1] == "inc": self.memory[data[0]] = self.get(data[0]) + int(data[2]) else: self.memory[data[0]] = self.get(data[0]) - int(data[2]) return max(self.memory.values()) def get(self, key): if key not in self.memory: self.memory[key] = 0 return 0 return self.memory[key]
22.222222
60
0.547
5b1a707e0c287ca71e75c769c45ca48964217df7
1,763
py
Python
examples/mplot3d/trisurf3d_demo2.py
jbbrokaw/matplotlib
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
[ "MIT", "BSD-3-Clause" ]
1
2019-04-15T09:40:53.000Z
2019-04-15T09:40:53.000Z
examples/mplot3d/trisurf3d_demo2.py
jbbrokaw/matplotlib
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
[ "MIT", "BSD-3-Clause" ]
2
2021-05-10T17:57:41.000Z
2021-07-26T16:23:09.000Z
examples/mplot3d/trisurf3d_demo2.py
jbbrokaw/matplotlib
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
[ "MIT", "BSD-3-Clause" ]
1
2015-12-21T07:24:54.000Z
2015-12-21T07:24:54.000Z
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import matplotlib.tri as mtri # u, v are parameterisation variables u = (np.linspace(0, 2.0 * np.pi, endpoint=True, num=50) * np.ones((10, 1))).flatten() v = np.repeat(np.linspace(-0.5, 0.5, endpoint=True, num=10), repeats=50).flatten() # This is the Mobius mapping, taking a u, v pair and returning an x, y, z # triple x = (1 + 0.5 * v * np.cos(u / 2.0)) * np.cos(u) y = (1 + 0.5 * v * np.cos(u / 2.0)) * np.sin(u) z = 0.5 * v * np.sin(u / 2.0) # Triangulate parameter space to determine the triangles tri = mtri.Triangulation(u, v) fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') # The triangles in parameter space determine which x, y, z points are # connected by an edge ax.plot_trisurf(x, y, z, triangles=tri.triangles, cmap=plt.cm.Spectral) ax.set_zlim(-1, 1) # First create the x and y coordinates of the points. n_angles = 36 n_radii = 8 min_radius = 0.25 radii = np.linspace(min_radius, 0.95, n_radii) angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False) angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) angles[:, 1::2] += np.pi/n_angles x = (radii*np.cos(angles)).flatten() y = (radii*np.sin(angles)).flatten() z = (np.cos(radii)*np.cos(angles*3.0)).flatten() # Create the Triangulation; no triangles so Delaunay triangulation created. triang = mtri.Triangulation(x, y) # Mask off unwanted triangles. xmid = x[triang.triangles].mean(axis=1) ymid = y[triang.triangles].mean(axis=1) mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0) triang.set_mask(mask) # tripcolor plot. fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') ax.plot_trisurf(triang, z, cmap=plt.cm.CMRmap) plt.show()
31.482143
85
0.694271
0e3aafa402b8f2563b909edd421444aaa0fcb88a
17,064
py
Python
mars/tensor/linalg/lu.py
smartguo/mars
5fdd6e2d520fcdc3b7441379e0abaf0e07c6212a
[ "Apache-2.0" ]
2
2019-03-29T04:11:10.000Z
2020-07-08T10:19:54.000Z
mars/tensor/linalg/lu.py
smartguo/mars
5fdd6e2d520fcdc3b7441379e0abaf0e07c6212a
[ "Apache-2.0" ]
null
null
null
mars/tensor/linalg/lu.py
smartguo/mars
5fdd6e2d520fcdc3b7441379e0abaf0e07c6212a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from numpy.linalg import LinAlgError from ... import opcodes as OperandDef from ...serialize import KeyField from ...core import ExecutableTuple from ...utils import check_chunks_unknown_shape, recursive_tile from ...tiles import TilesError from ..array_utils import device, as_same_device, is_sparse_module from ..operands import TensorHasInput, TensorOperandMixin from ..datasource import tensor as astensor class TensorLU(TensorHasInput, TensorOperandMixin): _op_type_ = OperandDef.LU _input = KeyField('input') def __init__(self, dtype=None, sparse=False, **kw): super().__init__(_dtype=dtype, _sparse=sparse, **kw) @property def output_limit(self): return 3 def __call__(self, a): import scipy.linalg a = astensor(a) if a.ndim != 2: raise LinAlgError(f'{a.ndim}-dimensional array given. ' 'Tensor must be two-dimensional') if a.shape[0] > a.shape[1]: p_shape = (a.shape[0],) * 2 l_shape = a.shape u_shape = (a.shape[1],) * 2 elif a.shape[0] < a.shape[1]: p_shape = (a.shape[0],) * 2 l_shape = (a.shape[0],) * 2 u_shape = a.shape else: p_shape, l_shape, u_shape = (a.shape,) * 3 tiny_p, tiny_l, tiny_u = scipy.linalg.lu(np.array([[1, 2], [2, 5]], dtype=a.dtype)) order = a.order p, l, u = self.new_tensors([a], kws=[ {'side': 'p', 'dtype': tiny_p.dtype, 'shape': p_shape, 'order': order}, {'side': 'l', 'dtype': tiny_l.dtype, 'shape': l_shape, 'order': order}, {'side': 'u', 'dtype': tiny_u.dtype, 'shape': u_shape, 'order': order}, ]) return ExecutableTuple([p, l, u]) @classmethod def _tile_one_chunk(cls, op): p, l, u = op.outputs chunk_op = op.copy().reset_key() chunk_kws = [ {'side': 'p', 'dtype': p.dtype, 'shape': p.shape, 'order': p.order, 'index': (0,) * p.ndim}, {'side': 'l', 'dtype': l.dtype, 'shape': l.shape, 'order': l.order, 'index': (0,) * l.ndim}, {'side': 'u', 'dtype': u.dtype, 'shape': u.shape, 'order': u.order, 'index': (0,) * u.ndim} ] chunks = chunk_op.new_chunks(op.input.chunks, kws=chunk_kws) new_op = op.copy() kws = [p.params, l.params, u.params] for i, out in enumerate([p, l, u]): kws[i]['nsplits'] = tuple((s,) for s in out.shape) kws[i]['chunks'] = [chunks[i]] return new_op.new_tensors(op.inputs, kws=kws) @classmethod def tile(cls, op): if len(op.input.chunks) == 1: return cls._tile_one_chunk(op) from ..arithmetic.subtract import TensorSubtract from ..arithmetic.add import TensorTreeAdd from ..base.transpose import TensorTranspose from ..merge.vstack import vstack from ..merge.hstack import hstack from ..datasource.zeros import TensorZeros, zeros from .dot import TensorDot from .solve_triangular import TensorSolveTriangular P, L, U = op.outputs raw_in_tensor = in_tensor = op.input out_tensor = op.outputs[0] if in_tensor.shape[0] > in_tensor.shape[1]: zero_tensor = zeros((in_tensor.shape[0], in_tensor.shape[0] - in_tensor.shape[1]), dtype=in_tensor.dtype, sparse=in_tensor.issparse(), gpu=in_tensor.op.gpu, chunk_size=(in_tensor.nsplits[0], max(in_tensor.nsplits[1])), order=in_tensor.order.value) in_tensor = hstack([in_tensor, zero_tensor]) recursive_tile(in_tensor) elif in_tensor.shape[0] < in_tensor.shape[1]: zero_tensor = zeros((in_tensor.shape[1] - in_tensor.shape[0], in_tensor.shape[1]), dtype=in_tensor.dtype, sparse=in_tensor.issparse(), gpu=in_tensor.op.gpu, chunk_size=(max(in_tensor.nsplits[0]), in_tensor.nsplits[1]), order=in_tensor.order.value) in_tensor = vstack([in_tensor, zero_tensor]) recursive_tile(in_tensor) check_chunks_unknown_shape([in_tensor], TilesError) if in_tensor.nsplits[0] != in_tensor.nsplits[1]: # all chunks on diagonal should be square nsplits = in_tensor.nsplits[0] in_tensor = in_tensor.rechunk([nsplits, nsplits])._inplace_tile() p_chunks, p_invert_chunks, lower_chunks, l_permuted_chunks, upper_chunks = {}, {}, {}, {}, {} for i in range(in_tensor.chunk_shape[0]): for j in range(in_tensor.chunk_shape[1]): if i < j: chunk_shape = (in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]) p_chunk = TensorZeros(sparse=op.sparse, order=out_tensor.order.value).new_chunk( None, shape=chunk_shape, index=(i, j), order=out_tensor.order) lower_chunk = TensorZeros(sparse=op.sparse, order=out_tensor.order.value).new_chunk( None, shape=chunk_shape, index=(i, j), order=out_tensor.order) p_chunks[p_chunk.index] = p_chunk lower_chunks[lower_chunk.index] = lower_chunk target_u = in_tensor.cix[i, j] p_invert = p_invert_chunks[i, i] target = TensorDot(dtype=U.dtype, sparse=U.op.sparse).new_chunk( [p_invert, target_u], shape=(p_invert.shape[0], target_u.shape[1]), order=out_tensor.order) if i > 0: prev_chunks_u = [] for p in range(i): a, b = lower_chunks[i, p], upper_chunks[p, j] prev_chunk = TensorDot(dtype=U.dtype, sparse=U.op.sparse).new_chunk( [a, b], shape=(a.shape[0], b.shape[1]), order=out_tensor.order) prev_chunks_u.append(prev_chunk) if len(prev_chunks_u) == 1: s = prev_chunks_u[0] else: tree_add_op = TensorTreeAdd(dtype=prev_chunks_u[0].dtype, sparse=op.sparse) s = tree_add_op.new_chunk(prev_chunks_u, shape=prev_chunks_u[0].shape) target = TensorSubtract(dtype=U.dtype, lhs=target, rhs=s, order=out_tensor.order.value).new_chunk( [target, s], shape=target.shape, order=out_tensor.order) upper_chunk = TensorSolveTriangular(lower=True, dtype=U.dtype, strict=False, sparse=lower_chunks[i, i].op.sparse).new_chunk( [lower_chunks[i, i], target], shape=target.shape, index=(i, j), order=out_tensor.order) upper_chunks[upper_chunk.index] = upper_chunk elif i == j: target = in_tensor.cix[i, j] if i > 0: prev_chunks = [] for p in range(i): a, b = l_permuted_chunks[i, p], upper_chunks[p, j] prev_chunk = TensorDot(dtype=a.dtype, sparse=op.sparse).new_chunk( [a, b], shape=(a.shape[0], b.shape[1]), order=out_tensor.order) prev_chunks.append(prev_chunk) if len(prev_chunks) == 1: s = prev_chunks[0] else: tree_add_op = TensorTreeAdd(dtype=prev_chunks[0].dtype, sparse=op.sparse) s = tree_add_op.new_chunk(prev_chunks, shape=prev_chunks[0].shape) target = TensorSubtract(dtype=L.dtype, lhs=target, rhs=s, order=out_tensor.order.value).new_chunk( [target, s], shape=target.shape) new_op = TensorLU(dtype=op.dtype, sparse=target.op.sparse) lu_chunks = new_op.new_chunks([target], index=(i, j), order=out_tensor.order, kws=[ {'side': 'p', 'dtype': P.dtype, 'shape': target.shape}, {'side': 'l', 'dtype': L.dtype, 'shape': target.shape}, {'side': 'u', 'dtype': U.dtype, 'shape': target.shape}, ]) p_chunk, lower_chunk, upper_chunk = lu_chunks # transposed p equals to inverted p p_chunk_invert = TensorTranspose(dtype=p_chunk.dtype, sparse=op.sparse).new_chunk( [p_chunk], shape=p_chunk.shape, index=p_chunk.index, order=out_tensor.order) p_chunks[p_chunk.index] = p_chunk p_invert_chunks[p_chunk_invert.index] = p_chunk_invert lower_chunks[lower_chunk.index] = lower_chunk upper_chunks[upper_chunk.index] = upper_chunk # l_permuted should be transferred to the final lower triangular for p in range(i): l_permuted_chunk = l_permuted_chunks[i, p] l_chunk = TensorDot(dtype=L.dtype, sparse=L.op.sparse).new_chunk( [p_chunk_invert, l_permuted_chunk], shape=(p_chunk_invert.shape[0], l_permuted_chunk.shape[1]), index=l_permuted_chunk.index, order=out_tensor.order ) lower_chunks[l_permuted_chunk.index] = l_chunk else: chunk_shape = (in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]) p_chunk = TensorZeros(sparse=op.sparse, order=out_tensor.order.value).new_chunk( None, shape=chunk_shape, index=(i, j), order=out_tensor.order) upper_chunk = TensorZeros(sparse=op.sparse, order=out_tensor.order.value).new_chunk( None, shape=chunk_shape, index=(i, j), order=out_tensor.order) p_chunks[p_chunk.index] = p_chunk upper_chunks[upper_chunk.index] = upper_chunk target_l = in_tensor.cix[i, j] if j > 0: prev_chunks_l = [] for p in range(j): a, b = l_permuted_chunks[i, p], upper_chunks[p, j] prev_chunk = TensorDot(dtype=L.dtype, sparse=L.op.sparse).new_chunk( [a, b], shape=(a.shape[0], b.shape[1]), order=out_tensor.order) prev_chunks_l.append(prev_chunk) if len(prev_chunks_l) == 1: s = prev_chunks_l[0] else: tree_add_op = TensorTreeAdd(dtype=prev_chunks_l[0].dtype, sparse=op.sparse) s = tree_add_op.new_chunk(prev_chunks_l, shape=prev_chunks_l[0].shape) target_l = TensorSubtract(dtype=L.dtype, lhs=target_l, rhs=s, order=out_tensor.order.value).new_chunk( [target_l, s], shape=target_l.shape, order=out_tensor.order) u = upper_chunks[j, j] a_transpose = TensorTranspose(dtype=u.dtype, sparse=op.sparse).new_chunk([u], shape=u.shape) target_transpose = TensorTranspose(dtype=target_l.dtype, sparse=op.sparse).new_chunk( [target_l], shape=target_l.shape) lower_permuted_chunk = TensorSolveTriangular( lower=True, dtype=L.dtype, strict=False, sparse=op.sparse).new_chunk( [a_transpose, target_transpose], shape=target_l.shape, index=(i, j), order=out_tensor.order) lower_transpose = TensorTranspose(dtype=lower_permuted_chunk.dtype, sparse=op.sparse).new_chunk( [lower_permuted_chunk], shape=lower_permuted_chunk.shape, index=lower_permuted_chunk.index) l_permuted_chunks[lower_permuted_chunk.index] = lower_transpose new_op = op.copy() kws = [ {'chunks': list(p_chunks.values()), 'nsplits': in_tensor.nsplits, 'dtype': P.dtype, 'shape': P.shape, 'order': P.order}, {'chunks': list(lower_chunks.values()), 'nsplits': in_tensor.nsplits, 'dtype': L.dtype, 'shape': L.shape, 'order': L.order}, {'chunks': list(upper_chunks.values()), 'nsplits': in_tensor.nsplits, 'dtype': U.dtype, 'shape': U.shape, 'order': U.order} ] if raw_in_tensor.shape[0] == raw_in_tensor.shape[1]: return new_op.new_tensors(op.inputs, kws=kws) p, l_, u = new_op.new_tensors(op.inputs, kws=kws) if raw_in_tensor.shape[0] > raw_in_tensor.shape[1]: l_ = l_[:, :raw_in_tensor.shape[1]]._inplace_tile() u = u[:raw_in_tensor.shape[1], :raw_in_tensor.shape[1]]._inplace_tile() else: p = p[:raw_in_tensor.shape[0], :raw_in_tensor.shape[0]]._inplace_tile() l_ = l_[:raw_in_tensor.shape[0], :raw_in_tensor.shape[0]]._inplace_tile() u = u[:raw_in_tensor.shape[0], :]._inplace_tile() kws = [ {'chunks': p.chunks, 'nsplits': p.nsplits, 'dtype': P.dtype, 'shape': p.shape, 'order': p.order}, {'chunks': l_.chunks, 'nsplits': l_.nsplits, 'dtype': l_.dtype, 'shape': l_.shape, 'order': l_.order}, {'chunks': u.chunks, 'nsplits': u.nsplits, 'dtype': u.dtype, 'shape': u.shape, 'order': u.order} ] return new_op.new_tensors(op.inputs, kws=kws) @classmethod def execute(cls, ctx, op): (a,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True) with device(device_id): if xp is np: import scipy.linalg p, l, u = scipy.linalg.lu(a) elif is_sparse_module(xp): p, l, u = xp.lu(a) else: raise NotImplementedError pc, lc, uc = op.outputs ctx[pc.key] = p ctx[lc.key] = l ctx[uc.key] = u def lu(a): """ LU decomposition The decomposition is:: A = P L U where P is a permutation matrix, L lower triangular with unit diagonal elements, and U upper triangular. Parameters ---------- a : (M, N) array_like Array to decompose Returns ------- p : (M, M) ndarray Permutation matrix l : (M, K) ndarray Lower triangular or trapezoidal matrix with unit diagonal. K = min(M, N) u : (K, N) ndarray Upper triangular or trapezoidal matrix Examples -------- >>> import mars.tensor as mt >>> A = mt.array([[1,2],[2,3]]) >>> A.execute() array([[ 1, 2], [ 2, 3]]) >>> P, L, U = mt.linalg.lu(A) >>> P.execute() array([[ 0, 1], [ 1, 0]]) >>> L.execute() array([[ 1, 0], [ 0.5, 1]]) >>> U.execute() array([[ 2, 3], [ 0, 0.5]]) >>> mt.dot(P.dot(L), U).execute() # verify that PL * U = A array([[ 1, 2], [ 2, 3]]) """ op = TensorLU(sparse=a.issparse()) return op(a)
47.798319
116
0.51676
0cb25c58ec30c5b52da2fb852613697fe56a5dcb
7,335
py
Python
pyspedas/mms/examples/tutorials/mms_data_6march19.py
amanotk/pyspedas
ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4
[ "MIT" ]
null
null
null
pyspedas/mms/examples/tutorials/mms_data_6march19.py
amanotk/pyspedas
ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4
[ "MIT" ]
null
null
null
pyspedas/mms/examples/tutorials/mms_data_6march19.py
amanotk/pyspedas
ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4
[ "MIT" ]
null
null
null
''' MMS Data in Python with pySPEDAS Eric Grimes - [email protected] *** Disclaimer: all of this is beta; please submit bug reports! *** *** Load times can be slow for some MMS data - should be fixed soon! *** Tentative agenda: - Introduction - Loading MMS Data in Python - Plotting MMS Data in Python - Analysis Tools - Future Plans ''' ''' ========================================================================== Introduction pySPEDAS bleeding edge: https://github.com/spedas/pyspedas pyTplot: https://github.com/MAVENSDC/PyTplot MMS Datasets: https://lasp.colorado.edu/mms/sdc/public/datasets/ ========================================================================== ''' # installing: pip install pyspedas or download bleeding edge from Github # configuration: edit mms_config.py # MMS_DATA_DIR environment variable # PYTHONSTARTUP file: mms_python_startup.py - allows you to avoid having to manually import the load routines and basic tools ''' ========================================================================== Loading MMS Data in Python Load routines: mms_load_xxx, where xxx is: MEC: Magnetic Ephemeris Coordinates FGM: Flux-gate Magnetometer SCM: Search-coil Magnetometer EDP: Electric field Double Probe (SDP+ADP) EDI: Electron Drift Instrument EIS: Energetic Ion Spectrometer FEEPS: Fly's Eye Energetic Particle Sensor HPCA: Hot Plasma Composition Analyzer FPI: Fast Plasma Investigation ASPOC: Active Spacecraft Potential Control ========================================================================== ''' from pyspedas.mms import mms_load_mec # start by loading some ephemeris / coordinates data; # the default trange is Oct 16, 2015, default probe is 1 # and the default data rate is 'srvy' mms_load_mec(probe=4) # find info on a load routine help(mms_load_mec) from pyspedas.mms import mms_load_fgm # note that the keywords are the same as in IDL mms_load_fgm(probe='4', data_rate='brst', trange=['2015-10-16/13:06', '2015-10-16/13:07'], time_clip=True) # find which variables were loaded from pytplot import tplot_names tplot_names() from pyspedas.mms import mms_load_edp # load some burst mode electric field data mms_load_edp(probe='4', data_rate='brst', trange=['2015-10-16/13:06', '2015-10-16/13:07'], time_clip=True) from pyspedas import tnames # the tnames function supports filtering with wild cards, e.g., # to find the E-field variables: dce_vars = tnames('*_edp_dce_*') # trange also accepts datetime objects # note: be aware of potential time zone issues from pyspedas.mms import mms_load_fpi from datetime import datetime from datetime import timezone as tz start_time = datetime(year=2015, month=10, day=16, hour=13, minute=6, tzinfo=tz.utc) end_time = datetime(year=2015, month=10, day=16, hour=13, minute=7, tzinfo=tz.utc) mms_load_fpi(trange=[start_time, end_time], probe='4', datatype='des-moms', data_rate='brst') # to return the actual data values, use get_data from pytplot import get_data times, fgm_data = get_data('mms4_fgm_b_gsm_brst_l2') # times are unix time (seconds since 1 January 1970) print(times[0]) # FGM data include the magnitude fgm_data[0] # you can convert the unix time to a string with time_string from pyspedas import time_string print(time_string(times[0])) # and convert back to unix time using time_double from pyspedas import time_double print(time_double('2015-10-16 13:06:00.00451')) # create new tplot variables with store_data from pytplot import store_data # save the B-field vector store_data('b_vector', data={'x': times, 'y': fgm_data[:, 0:3]}) # save the B-field magnitude store_data('b_mag', data={'x': times, 'y': fgm_data[:, 3]}) # the keywords are very flexible, e.g., from pyspedas.mms import mms_load_hpca, mms_load_eis, mms_load_feeps, mms_load_aspoc # specify multiple probes as integers, and multiple datatypes mms_load_hpca(probe=[1, 2, 4], data_rate='brst', datatype=['moments', 'ion'], trange=['2015-10-16/13:06', '2015-10-16/13:07']) mms_load_eis(probe='4', data_rate='brst', datatype=['phxtof', 'extof'], trange=['2015-10-16/13:06', '2015-10-16/13:07']) mms_load_feeps(get_support_data=True, probe=2, data_rate=['srvy', 'brst'], trange=['2015-10-16/13:06', '2015-10-16/13:07']) mms_load_aspoc() ''' ========================================================================== Plotting MMS Data in Python ========================================================================== ''' from pytplot import tplot # like in IDL, pyTplot supports strings, lists of strings, as well as tplot variable #s tplot('mms4_fgm_b_gsm_brst_l2') tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_edp_dce_gse_brst_l2']) # change the plot metadata from pytplot import options options('mms4_edp_dce_gse_brst_l2', 'color', ['b', 'g', 'r']) options('mms4_edp_dce_gse_brst_l2', 'legend_names', ['Ex', 'Ey', 'Ez']) tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_edp_dce_gse_brst_l2', 'mms4_des_energyspectr_omni_brst', 'mms4_des_pitchangdist_miden_brst']) tplot('b_vector') options('b_vector', 'ytitle', 'MMS4 FGM') options('b_vector', 'color', ['b', 'g', 'r']) options('b_vector', 'legend_names', ['Bx', 'By', 'Bz']) tplot('b_vector') # add vertical bars at certain times from pytplot import timebar timebar([time_double('2015-10-16/13:06:20'), time_double('2015-10-16/13:06:40')], varname='mms4_fgm_b_gsm_brst_l2') tplot('mms4_fgm_b_gsm_brst_l2') tplot(['mms4_des_numberdensity_brst', 'mms4_hpca_hplus_number_density']) # set the y-axis to log scale options('mms4_des_numberdensity_brst', 'ylog', True) options('mms4_hpca_hplus_number_density', 'ylog', True) tplot(['mms4_des_numberdensity_brst', 'mms4_hpca_hplus_number_density']) ''' ========================================================================== Analysis Tools ========================================================================== ''' # subtract_average from pyspedas import subtract_average subtract_average('mms4_fgm_b_gsm_brst_l2') tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d']) # subtract_median from pyspedas import subtract_median subtract_median('mms4_fgm_b_gsm_brst_l2') tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d', 'mms4_fgm_b_gsm_brst_l2-m']) # time clip from pyspedas import time_clip time_clip(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d', 'mms4_fgm_b_gsm_brst_l2-m'], '2015-10-16/13:06:45', '2015-10-16/13:07', suffix='') tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d', 'mms4_fgm_b_gsm_brst_l2-m']) # clip the data from pyspedas import tclip tclip(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d', 'mms4_fgm_b_gsm_brst_l2-m'], 0, 20, suffix='') tplot(['mms4_fgm_b_gsm_brst_l2', 'mms4_fgm_b_gsm_brst_l2-d', 'mms4_fgm_b_gsm_brst_l2-m']) ''' ========================================================================== Future Plans - Speed up CDF load times - Searchable MMS events database - Corrected FEEPS omni-directional spectra - Omni-directional HPCA spectra (summed over anodes+spin averaged) - Spin averaged EIS, FEEPS spectra - EIS, FEEPS pitch angle distributions - FPI lossy compression, error bars - Read-only mirror support - Tests, documentation, crib sheets ========================================================================== '''
32.312775
147
0.670757
579758b9b1052c48161f183deca23dc374128b73
15,608
py
Python
emd/search_emd.py
AGiannoutsos/Latent_vs_Original_Space_Image_Classification
f2971b2b06d837ef85016ee8faf1715c660ca161
[ "MIT" ]
1
2021-02-21T07:33:47.000Z
2021-02-21T07:33:47.000Z
emd/search_emd.py
AGiannoutsos/Latent_vs_Original_Space_Image_Classification
f2971b2b06d837ef85016ee8faf1715c660ca161
[ "MIT" ]
null
null
null
emd/search_emd.py
AGiannoutsos/Latent_vs_Original_Space_Image_Classification
f2971b2b06d837ef85016ee8faf1715c660ca161
[ "MIT" ]
1
2022-02-04T12:55:31.000Z
2022-02-04T12:55:31.000Z
import numpy as np import os import sys import struct import json from array import array as pyarray import matplotlib.pyplot as plt import time # Define class with colors for UI improvement class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def load_mnist(dataset, digits=np.arange(10), type='data', numOfElements=-1): intType = np.dtype( 'int32' ).newbyteorder( '>' ) if not os.path.isfile(dataset): return None fname = os.path.join(".", dataset) if (type == 'data'): nMetaDataBytes = 4 * intType.itemsize images = np.fromfile(fname, dtype = 'ubyte') magicBytes, size, rows, cols = np.frombuffer(images[:nMetaDataBytes].tobytes(), intType) if numOfElements == -1: numOfElements = size #int(len(ind) * size/100.) images = images[nMetaDataBytes:].astype(dtype = 'float32').reshape([numOfElements, rows, cols, 1]) return images elif (type == 'labels'): nMetaDataBytes = 2 * intType.itemsize labels = np.fromfile(fname, dtype = 'ubyte')[nMetaDataBytes:] return labels else: return None # distance def manhattan_distances(data, queries): # empty matrix for resutls results = np.empty((queries.shape[0], data.shape[0]), dtype=np.float32) # fill the manhattan distance of every query for query in range(queries.shape[0]): l1_norm = np.linalg.norm((data - queries[query]), ord=1, axis=1) results[query] = l1_norm return results.T # manhattan distance def euklidian_distances(data, queries): # empty matrix for resutls results = np.empty((queries.shape[0], data.shape[0]), dtype=np.float32) # fill the manhattan distance of every query for query in range(queries.shape[0]): l1_norm = np.linalg.norm((data - queries[query]), axis=1) results[query] = l1_norm return results.T def earths_movers_distances(data, queries, distances, A): # empty matrix for resutls results = np.empty((len(queries), len(data)), dtype=np.float32) # fill the manhattan distance of every query for query in range(len(queries)): for data_i in range(len(data)): emd = earths_movers_distance(data[data_i], queries[query], distances=distances, A=A) # emd = ot.emd2(data[data_i], queries[query],distances_array) results[query][data_i] = emd return results.T # KNN classifier class KNN(): def __init__(self, n_neighbors=10, distance_function=manhattan_distances, distances=None, A=None): self.distance_function = distance_function self.n_neighbors = n_neighbors self.prediction_time = 0 # emd distances self.distances = distances self.A = A def fit(self, x_train, y_train): # place self_x in an array self.x_train = x_train self.y_train = y_train # regcognise different classes self.classes = list(np.unique(y_train)) self.classes.sort() def predict(self, x_test): # strt the prdiciton timer start_time = time.time() # get distances for all the vectors if self.distances is None: distances = self.distance_function(self.x_train, x_test) else: # earths movers distance distances = self.distance_function(self.x_train, x_test, self.distances, self.A) self.y_pred = self.y_train[np.argpartition(distances.T, self.n_neighbors)][:,0:self.n_neighbors] # get the prediction time end_time = time.time() self.prediction_time = end_time - start_time return self.y_pred.reshape((-1, self.n_neighbors)) def predict_proba(self, x_test): self.predict(x_test) # predict propabilities propabilities = [] for prediction in self.y_pred.tolist(): propability = [prediction.count(cl)/self.n_neighbors for cl in self.classes] propabilities.append(propability) return np.array(propabilities) def get_Accuracy(y_pred, y_true): results = y_pred - y_true # zeros are the correct so calculate them mean_accuracies = np.mean(results == 0, axis=1) # get the mean accuracy mean_accuracy = np.mean(mean_accuracies) return mean_accuracy ########################################################################### ################################### EMD ################################### ########################################################################### # manipulate strides to create a winowed resize of the 2d array # ideas were taken from https://github.com/scikit-image/scikit-image/blob/master/skimage/util/shape.py from numpy.lib.stride_tricks import as_strided def get_Windowed_view(array_in, shape): windows_in_image = np.array(array_in.shape) / np.array(shape) new_shape = list(windows_in_image.astype(int)) + shape new_strides = (np.array(array_in.strides)*shape[0]).tolist() + list(array_in.strides) arr_out = as_strided(array_in, shape=new_shape, strides=new_strides) return arr_out # get the clusters as a list from all the images def get_Clusters(images, window): # normalize images images = images.reshape(images.shape[0:-1]) # images = images / images.sum(axis=(1,2), keepdims=True) cluster_weights = [] for image in images: # get the windowed image windowed_image = get_Windowed_view(image, window) # sum the windows-clusters windowed_image = np.sum(windowed_image, axis=(2,3)) # cluster_weights.append( windowed_image.tolist() ) cluster_weights.append( windowed_image.reshape((-1)).tolist() ) return cluster_weights # get the euklidian diastnces of the clusters def get_Clusters_distances(dim, image_shape): clusters = [] for i in range(0, image_shape[0], dim): for j in range(0, image_shape[1], dim): cluster = [i,j] clusters.append(cluster) clusters = np.array(clusters) # euklidian distances n*n matrix distances = euklidian_distances(clusters,clusters) distances = np.ascontiguousarray(distances, dtype=np.float64) distances_array = distances # reshape and make it a list for the scipy linprog distances = distances.reshape((-1)).tolist() return distances, distances_array def get_A(num_of_weights, num_of_variables): """ A matrix should be like: Α = [[1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0], [0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0], [0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1], [1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0], [0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0], [0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0], [0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1]] in order to satisfy the constraints of the EMD distance """ A = np.zeros((num_of_weights, num_of_variables)) # fill first half mask_ones = int(num_of_weights/2) mask = np.ones((1, mask_ones)) for i in range(mask_ones): A[i][i*mask_ones : i*mask_ones + mask_ones] = mask # second half one_offset = 0 for i in range(mask_ones, num_of_weights, 1): for j in range(mask_ones): A[i][j*mask_ones + one_offset] = 1 one_offset += 1 return A # implement EMD as proposed in # https://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/rubner-jcviu-00.pdf # linear programming from scipy.optimize import linprog def earths_movers_distance(image1_clustes, image2_clustes, distances, A): # distances dij c = distances # weights b = image1_clustes + image2_clustes # linear optimization # res = linprog(c, A_eq=A, b_eq=b, method='revised simplex', options={"tol":1e-7}) # res = linprog(c, A_eq=A, b_eq=b, options={"tol":1e-5}) Aeq = np.ones((1, A.shape[1])) beq = min(sum(image1_clustes), sum(image2_clustes)) res = linprog(c, A_ub=A, b_ub=b, A_eq=Aeq, b_eq=beq, method='revised simplex') return res.fun / sum(res.x) if __name__ == '__main__': # Reading inline arguments # <-d> argument if '-d' not in sys.argv: print(bcolors.FAIL+'Error: missing argument <-d>.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() else: if sys.argv.index('-d') == len(sys.argv)-1: print(bcolors.FAIL+'Error: invalid arguments.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() datasetFile = sys.argv[sys.argv.index('-d')+1] # <-dl> argument if '-q' not in sys.argv: print(bcolors.FAIL+'Error: missing argument <-q>.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() else: if sys.argv.index('-q') == len(sys.argv)-1: print(bcolors.FAIL+'Error: invalid arguments.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() testsetFile = sys.argv[sys.argv.index('-q')+1] # <-t> argument if '-l1' not in sys.argv: print(bcolors.FAIL+'Error: missing argument <-l1>.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() else: if sys.argv.index('-l1') == len(sys.argv)-1: print(bcolors.FAIL+'Error: invalid arguments.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() dlabelsFile = sys.argv[sys.argv.index('-l1')+1] # <-tl> argument if '-l2' not in sys.argv: print(bcolors.FAIL+'Error: missing argument <-l2>.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() else: if sys.argv.index('-l2') == len(sys.argv)-1: print(bcolors.FAIL+'Error: invalid arguments.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() tlabelsFile = sys.argv[sys.argv.index('-l2')+1] # <-model> argument if '-o' not in sys.argv: print(bcolors.FAIL+'Error: missing argument <-o>.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() else: if sys.argv.index('-o') == len(sys.argv)-1: print(bcolors.FAIL+'Error: invalid arguments.'+bcolors.ENDC) print(bcolors.WARNING+'Executable should be called:', sys.argv[0], '–d <input file original space> –q <query file original space> -l1 <labels of input dataset> -l2 <labels of query dataset> -ο <output file>'+bcolors.ENDC) sys.exit() outputFile = sys.argv[sys.argv.index('-o')+1] # datasetFile = "/content/Latent_vs_Original_Space_Image_Classification/data/train-images-idx3-ubyte" # dlabelsFile = "/content/Latent_vs_Original_Space_Image_Classification/data/train-labels-idx1-ubyte" # testsetFile = "/content/Latent_vs_Original_Space_Image_Classification/data/t10k-images-idx3-ubyte" # tlabelsFile = "/content/Latent_vs_Original_Space_Image_Classification/data/t10k-labels-idx1-ubyte" # print(datasetFile,dlabelsFile,testsetFile,tlabelsFile, outputFile) t = 100 q = 5 train_X = load_mnist(datasetFile, type='data')[0:t] train_Y = load_mnist(dlabelsFile, type='labels')[0:t] test_X = load_mnist(testsetFile, type='data')[0:q] test_Y = load_mnist(tlabelsFile, type='labels')[0:q] # reshape labels train_Y = train_Y.reshape((-1,1)) test_Y = test_Y.reshape((-1,1)) input_shape = train_X.shape[1:] num_of_classes = train_Y.shape ########################################################################### ################################### KNN ################################### ########################################################################### # preprocess for knn manhattan x_train = train_X.reshape((train_X.shape[0], -1)) x_test = test_X.reshape((test_X.shape[0], -1)) # init knn classifier manhattan_knn = KNN(10, manhattan_distances) manhattan_knn.fit(x_train, train_Y) manhattan_predictions = manhattan_knn.predict(x_test) manhattan_accuracy = get_Accuracy(manhattan_predictions, test_Y) # print accuracy and time manhattan_message = "Average Correct Search Results MANHATTAN: %0.4f in Time: %0.5f seconds" % (manhattan_accuracy, manhattan_knn.prediction_time) print(manhattan_message) ########################################################################### ################################### EMD ################################### ########################################################################### # preprocess for EMD dim = 14 # get clusters of train and test train_clusters = get_Clusters(train_X, [dim, dim]) test_clusters = get_Clusters(test_X, [dim, dim]) # get distances distances, distances_array = get_Clusters_distances(dim, input_shape[:-1]) # get the number of variables # 2 times the weights for the linprog num_of_weights = 2*len(train_clusters[0]) # variables are the of distances num_of_variables = len(distances) # get A for the EMD coefficients A = get_A(num_of_weights, num_of_variables) # emd_knn emd_knn = KNN(10, earths_movers_distances, distances, A) emd_knn.fit(train_clusters, train_Y) emd_predictions = emd_knn.predict(test_clusters) emd_accuracy = get_Accuracy(emd_predictions, test_Y) # print accuracy and time emd_message = "Average Correct Search Results EMD: %0.4f in Time: %0.5f seconds with cluster size: %d" % (emd_accuracy, emd_knn.prediction_time, dim) print(emd_message) with open(outputFile, "w") as f: f.write(emd_message+"\n") f.write(manhattan_message+"\n")
38.633663
244
0.622309
61463267f1317039eb12ac04585156f60268db75
664
py
Python
vegan/base.py
vaitekunas/vegan
5a027ed2c5fc5fbcaf8e14a303a05e057e9a970f
[ "MIT" ]
null
null
null
vegan/base.py
vaitekunas/vegan
5a027ed2c5fc5fbcaf8e14a303a05e057e9a970f
[ "MIT" ]
null
null
null
vegan/base.py
vaitekunas/vegan
5a027ed2c5fc5fbcaf8e14a303a05e057e9a970f
[ "MIT" ]
null
null
null
"""Base class that implements some functionality that most vegan classes need.""" from vegan.progress import Progress from vegan import utils class Base(object): """Implements the logging facility update.""" """Base logging facility.""" _log = None """Base progress facility.""" _progress = None def __init__(self, logger=None): """Initialize the base class.""" # Specify a logger if logger is not None and callable(logger): self._log = logger else: self._log = utils.simple_log # Specify a progressbar self._progress = Progress(width=32, block="#", blank=" ")
25.538462
81
0.628012
7ace7c79cbb25ef02699a3723bccfe52c1278e63
444
py
Python
ams/ticket/migrations/0002_auto_20190116_2258.py
magnuspedro/ams
72ef810d14d9a4724e781489d081140be6674d60
[ "MIT" ]
null
null
null
ams/ticket/migrations/0002_auto_20190116_2258.py
magnuspedro/ams
72ef810d14d9a4724e781489d081140be6674d60
[ "MIT" ]
null
null
null
ams/ticket/migrations/0002_auto_20190116_2258.py
magnuspedro/ams
72ef810d14d9a4724e781489d081140be6674d60
[ "MIT" ]
null
null
null
# Generated by Django 2.1.4 on 2019-01-16 22:58 from django.db import migrations, models import uuid class Migration(migrations.Migration): dependencies = [ ('ticket', '0001_initial'), ] operations = [ migrations.AlterField( model_name='ticket', name='code', field=models.CharField(default=uuid.UUID('e0bef5cc-addc-435a-8725-0e8b2845f180'), max_length=255), ), ]
22.2
110
0.619369
8f35f3c50ea8f49fb28f6de1e018c9b8963b43f1
12,290
py
Python
python/ray/autoscaler/_private/kuberay/node_provider.py
orcahmlee/ray
298742d7241681ee1f307ec0dd3cd7e9713a3c7d
[ "Apache-2.0" ]
null
null
null
python/ray/autoscaler/_private/kuberay/node_provider.py
orcahmlee/ray
298742d7241681ee1f307ec0dd3cd7e9713a3c7d
[ "Apache-2.0" ]
41
2021-09-21T01:13:48.000Z
2022-03-19T07:12:22.000Z
python/ray/autoscaler/_private/kuberay/node_provider.py
LaudateCorpus1/ray
20cf2edfef7103c269358a49a48c2159315ee132
[ "Apache-2.0" ]
1
2019-09-24T16:24:49.000Z
2019-09-24T16:24:49.000Z
import json import logging import requests from typing import Any, Dict, List, Tuple from ray.autoscaler._private.constants import ( DISABLE_NODE_UPDATERS_KEY, DISABLE_LAUNCH_CONFIG_CHECK_KEY, FOREGROUND_NODE_LAUNCH_KEY, ) from ray.autoscaler.node_provider import NodeProvider from ray.autoscaler.tags import ( NODE_KIND_HEAD, NODE_KIND_WORKER, STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED, TAG_RAY_NODE_KIND, TAG_RAY_USER_NODE_TYPE, ) # Terminology: # Labels and Tags # We call the Kuberay labels "labels" and the Ray autoscaler tags "tags". # The labels are prefixed by "ray.io". Tags are prefixed by "ray-". # We convert between the two but do not mix them. # Worker Groups and Available Node Types # In Kuberay the different node types are called "worker groups", in the # the Ray autoscaler they are called "available node types". # Design: # Each modification the autoscaler wants to make is posted to the API server goal state # (e.g. if the autoscaler wants to scale up, it increases the number of # replicas of the worker group it wants to scale, if it wants to scale down # it decreases the number of replicas and adds the exact pods that should be # terminated to the scaleStrategy). In order to guarantee consistency, the NodeProvider # then waits until Kuberay's reconciliation loop creates the pod specifications in the # API server and then returns control back to the autoscaler. The waiting period # is typically small, on the order of a few seconds. We make sure that only one # such modification is in process by serializing all modification operations with # a lock in the NodeProvider. # Note: Log handlers set up in autoscaling monitor entrypoint. logger = logging.getLogger(__name__) provider_exists = False def to_label_selector(tags: Dict[str, str]) -> str: """Convert tags to label selector to embed in query to K8s API server.""" label_selector = "" for k, v in tags.items(): if label_selector != "": label_selector += "," label_selector += "{}={}".format(k, v) return label_selector def status_tag(pod: Dict[str, Any]) -> str: """Convert pod state to Ray autoscaler status tag.""" if ( "containerStatuses" not in pod["status"] or not pod["status"]["containerStatuses"] ): return "pending" state = pod["status"]["containerStatuses"][0]["state"] if "pending" in state: return "pending" if "running" in state: return STATUS_UP_TO_DATE if "waiting" in state: return "waiting" if "terminated" in state: return STATUS_UPDATE_FAILED raise ValueError("Unexpected container state.") def make_node_tags(labels: Dict[str, str], status_tag: str) -> Dict[str, str]: """Convert Kuberay labels to Ray autoscaler tags.""" tags = {"ray-node-status": status_tag} if labels["ray.io/node-type"] == "head": tags[TAG_RAY_NODE_KIND] = NODE_KIND_HEAD tags[TAG_RAY_USER_NODE_TYPE] = "head-group" else: tags[TAG_RAY_NODE_KIND] = NODE_KIND_WORKER tags[TAG_RAY_USER_NODE_TYPE] = labels["ray.io/group"] return tags def load_k8s_secrets() -> Tuple[Dict[str, str], str]: """ Loads secrets needed to access K8s resources. Returns: headers: Headers with K8s access token verify: Path to certificate """ with open("/var/run/secrets/kubernetes.io/serviceaccount/token") as secret: token = secret.read() headers = { "Authorization": "Bearer " + token, } verify = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" return headers, verify def url_from_resource(namespace: str, path: str) -> str: """Convert resource path to REST URL for Kubernetes API server. Args: namespace: The K8s namespace of the resource path: The part of the resource path that starts with the resource type. Supported resource types are "pods" and "rayclusters". """ if path.startswith("pods"): api_group = "/api/v1" elif path.startswith("rayclusters"): api_group = "/apis/ray.io/v1alpha1" else: raise NotImplementedError("Tried to access unknown entity at {}".format(path)) return ( "https://kubernetes.default:443" + api_group + "/namespaces/" + namespace + "/" + path ) def _worker_group_index(raycluster: Dict[str, Any], group_name: str) -> int: """Extract worker group index from RayCluster.""" group_names = [spec["groupName"] for spec in raycluster["spec"]["workerGroupSpecs"]] return group_names.index(group_name) class KuberayNodeProvider(NodeProvider): # type: ignore def __init__( self, provider_config: Dict[str, Any], cluster_name: str, _allow_multiple: bool = False, ): logger.info("Creating KuberayNodeProvider.") self.namespace = provider_config["namespace"] self.cluster_name = cluster_name self.headers, self.verify = load_k8s_secrets() # Disallow multiple node providers, unless explicitly allowed for testing. global provider_exists if not _allow_multiple: assert ( not provider_exists ), "Only one KuberayNodeProvider allowed per process." assert ( provider_config.get(DISABLE_NODE_UPDATERS_KEY, False) is True ), f"To use KuberayNodeProvider, must set `{DISABLE_NODE_UPDATERS_KEY}:True`." assert provider_config.get(DISABLE_LAUNCH_CONFIG_CHECK_KEY, False) is True, ( "To use KuberayNodeProvider, must set " f"`{DISABLE_LAUNCH_CONFIG_CHECK_KEY}:True`." ) assert ( provider_config.get(FOREGROUND_NODE_LAUNCH_KEY, False) is True ), f"To use KuberayNodeProvider, must set `{FOREGROUND_NODE_LAUNCH_KEY}:True`." provider_exists = True super().__init__(provider_config, cluster_name) def _url(self, path: str) -> str: """Convert resource path to REST URL for Kubernetes API server.""" if path.startswith("pods"): api_group = "/api/v1" elif path.startswith("rayclusters"): api_group = "/apis/ray.io/v1alpha1" else: raise NotImplementedError( "Tried to access unknown entity at {}".format(path) ) return ( "https://kubernetes.default:443" + api_group + "/namespaces/" + self.namespace + "/" + path ) def _get(self, path: str) -> Dict[str, Any]: """Wrapper for REST GET of resource with proper headers.""" url = url_from_resource(namespace=self.namespace, path=path) result = requests.get(url, headers=self.headers, verify=self.verify) assert result.status_code == 200 return result.json() def _get_non_terminating_pods( self, tag_filters: Dict[str, str] ) -> List[Dict[str, Any]]: """Get the list of pods in the Ray cluster, excluding pods marked for deletion. Filter by the specified tag_filters. Return a list of pod objects, represented as dictionaries. Details on K8s resource deletion: https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-deletion """ label_filters = to_label_selector( { "ray.io/cluster": self.cluster_name, } ) data = self._get("pods?labelSelector=" + requests.utils.quote(label_filters)) result = [] for pod in data["items"]: # Kubernetes sets metadata.deletionTimestamp immediately after admitting a # request to delete an object. Full removal of the object may take some time # after the deletion timestamp is set. See link in docstring for details. if "deletionTimestamp" in pod["metadata"]: # Ignore pods marked for termination. continue labels = pod["metadata"]["labels"] tags = make_node_tags(labels, status_tag(pod)) if tag_filters.items() <= tags.items(): result.append(pod) return result def _patch(self, path: str, payload: List[Dict[str, Any]]) -> Dict[str, Any]: """Wrapper for REST PATCH of resource with proper headers.""" url = url_from_resource(namespace=self.namespace, path=path) result = requests.patch( url, json.dumps(payload), headers={**self.headers, "Content-type": "application/json-patch+json"}, verify=self.verify, ) assert result.status_code == 200 return result.json() def create_node( self, node_config: Dict[str, Any], tags: Dict[str, str], count: int ) -> Dict[str, Dict[str, str]]: """Creates a number of nodes within the namespace.""" url = "rayclusters/{}".format(self.cluster_name) raycluster = self._get(url) group_name = tags["ray-user-node-type"] group_index = _worker_group_index(raycluster, group_name) tag_filters = {TAG_RAY_USER_NODE_TYPE: group_name} current_replica_count = len(self.non_terminated_nodes(tag_filters)) path = f"/spec/workerGroupSpecs/{group_index}/replicas" payload = [ { "op": "replace", "path": path, "value": current_replica_count + count, }, ] self._patch(url, payload) return {} def internal_ip(self, node_id: str) -> str: """Get internal IP of a node (= Kubernetes pod).""" data = self._get("pods/{}".format(node_id)) return data["status"].get("podIP", "IP not yet assigned") def node_tags(self, node_id: str) -> Dict[str, str]: """Get tags of a node (= Kubernetes pod).""" data = self._get("pods/{}".format(node_id)) return make_node_tags(data["metadata"]["labels"], status_tag(data)) def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]: """Return a list of node ids filtered by the specified tags dict.""" return [ pod["metadata"]["name"] for pod in self._get_non_terminating_pods(tag_filters) ] def terminate_node(self, node_id: str) -> None: """Terminates the specified node (= Kubernetes pod).""" self.terminate_nodes([node_id]) def terminate_nodes(self, node_ids: List[str]) -> Dict[str, Dict[str, str]]: """Batch terminates the specified nodes (= Kubernetes pods).""" # Split node_ids into groups according to node type and terminate # them individually. Note that in most cases, node_ids contains # a single element and therefore it is most likely not worth # optimizing this code to batch the requests to the API server. groups = {} current_replica_counts = {} label_filters = to_label_selector({"ray.io/cluster": self.cluster_name}) pods = self._get("pods?labelSelector=" + requests.utils.quote(label_filters)) for pod in pods["items"]: group_name = pod["metadata"]["labels"]["ray.io/group"] current_replica_counts[group_name] = ( current_replica_counts.get(group_name, 0) + 1 ) if pod["metadata"]["name"] in node_ids: groups.setdefault(group_name, []).append(pod["metadata"]["name"]) url = "rayclusters/{}".format(self.cluster_name) raycluster = self._get(url) for group_name, nodes in groups.items(): group_index = _worker_group_index(raycluster, group_name) prefix = f"/spec/workerGroupSpecs/{group_index}" payload = [ { "op": "replace", "path": prefix + "/replicas", "value": current_replica_counts[group_name] - len(nodes), }, { "op": "replace", "path": prefix + "/scaleStrategy", "value": {"workersToDelete": nodes}, }, ] self._patch(url, payload) return {}
37.355623
88
0.626444
ea8c6a21b8687d2d6100b558dfacbc632d26e356
2,583
py
Python
sympy/diffgeom/tests/test_hyperbolic_space.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
8,323
2015-01-02T15:51:43.000Z
2022-03-31T13:13:19.000Z
sympy/diffgeom/tests/test_hyperbolic_space.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
15,102
2015-01-01T01:33:17.000Z
2022-03-31T22:53:13.000Z
sympy/diffgeom/tests/test_hyperbolic_space.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
4,490
2015-01-01T17:48:07.000Z
2022-03-31T17:24:05.000Z
r''' unit test describing the hyperbolic half-plane with the Poincare metric. This is a basic model of hyperbolic geometry on the (positive) half-space {(x,y) \in R^2 | y > 0} with the Riemannian metric ds^2 = (dx^2 + dy^2)/y^2 It has constant negative scalar curvature = -2 https://en.wikipedia.org/wiki/Poincare_half-plane_model ''' from sympy import diag from sympy.diffgeom import (twoform_to_matrix, metric_to_Christoffel_1st, metric_to_Christoffel_2nd, metric_to_Riemann_components, metric_to_Ricci_components) import sympy.diffgeom.rn from sympy.tensor.array import ImmutableDenseNDimArray def test_H2(): TP = sympy.diffgeom.TensorProduct R2 = sympy.diffgeom.rn.R2 y = R2.y dy = R2.dy dx = R2.dx g = (TP(dx, dx) + TP(dy, dy))*y**(-2) automat = twoform_to_matrix(g) mat = diag(y**(-2), y**(-2)) assert mat == automat gamma1 = metric_to_Christoffel_1st(g) assert gamma1[0, 0, 0] == 0 assert gamma1[0, 0, 1] == -y**(-3) assert gamma1[0, 1, 0] == -y**(-3) assert gamma1[0, 1, 1] == 0 assert gamma1[1, 1, 1] == -y**(-3) assert gamma1[1, 1, 0] == 0 assert gamma1[1, 0, 1] == 0 assert gamma1[1, 0, 0] == y**(-3) gamma2 = metric_to_Christoffel_2nd(g) assert gamma2[0, 0, 0] == 0 assert gamma2[0, 0, 1] == -y**(-1) assert gamma2[0, 1, 0] == -y**(-1) assert gamma2[0, 1, 1] == 0 assert gamma2[1, 1, 1] == -y**(-1) assert gamma2[1, 1, 0] == 0 assert gamma2[1, 0, 1] == 0 assert gamma2[1, 0, 0] == y**(-1) Rm = metric_to_Riemann_components(g) assert Rm[0, 0, 0, 0] == 0 assert Rm[0, 0, 0, 1] == 0 assert Rm[0, 0, 1, 0] == 0 assert Rm[0, 0, 1, 1] == 0 assert Rm[0, 1, 0, 0] == 0 assert Rm[0, 1, 0, 1] == -y**(-2) assert Rm[0, 1, 1, 0] == y**(-2) assert Rm[0, 1, 1, 1] == 0 assert Rm[1, 0, 0, 0] == 0 assert Rm[1, 0, 0, 1] == y**(-2) assert Rm[1, 0, 1, 0] == -y**(-2) assert Rm[1, 0, 1, 1] == 0 assert Rm[1, 1, 0, 0] == 0 assert Rm[1, 1, 0, 1] == 0 assert Rm[1, 1, 1, 0] == 0 assert Rm[1, 1, 1, 1] == 0 Ric = metric_to_Ricci_components(g) assert Ric[0, 0] == -y**(-2) assert Ric[0, 1] == 0 assert Ric[1, 0] == 0 assert Ric[0, 0] == -y**(-2) assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2)) ## scalar curvature is -2 #TODO - it would be nice to have index contraction built-in R = (Ric[0, 0] + Ric[1, 1])*y**2 assert R == -2 ## Gauss curvature is -1 assert R/2 == -1
28.076087
85
0.550523
21ecaa910746effed02a527d938b35d55d5494f9
1,493
py
Python
VG.py
kmi-robots/spatial-KB
c7c917dae5cf5d29a91c3433aa3ccee32101f18a
[ "Apache-2.0" ]
null
null
null
VG.py
kmi-robots/spatial-KB
c7c917dae5cf5d29a91c3433aa3ccee32101f18a
[ "Apache-2.0" ]
null
null
null
VG.py
kmi-robots/spatial-KB
c7c917dae5cf5d29a91c3433aa3ccee32101f18a
[ "Apache-2.0" ]
null
null
null
"""Visual Genome processing methods""" import json from collections import OrderedDict,Counter def load_rel_bank(KBobj): with open(KBobj.path_to_VGrel) as ind: raw_data = json.load(ind) return raw_data def update_VG_stats(stats_dict,pred,sub_syn, obj_syn): stats_dict = add_relation_counts(stats_dict,pred,sub_syn, obj_syn) # Repeat all of the above for the case of near, i.e., all predicates generalise back to near #if pred not in alias_index['near']: # stats_dict = add_relation_counts(stats_dict, "near", alias_index["near"], sub_syn, obj_syn) return stats_dict def add_relation_counts(stats_dict,pred,sub_syn, obj_syn): # How many times subj - pred - obj? if pred not in stats_dict["predicates"]: stats_dict["predicates"][pred] = {} stats_dict["predicates"][pred]["relations"] = Counter() #stats_dict["predicates"][pred]["aliases"] = aliases stats_dict["predicates"][pred]["relations"][str((str(sub_syn[0]), str(obj_syn[0])))] += 1 # how many times subject in relationship of type pred? if str(sub_syn[0]) not in stats_dict["subjects"]: stats_dict["subjects"][str(sub_syn[0])] = Counter() stats_dict["subjects"][str(sub_syn[0])][pred] += 1 # how many times object in relationship of type pred? if str(obj_syn[0]) not in stats_dict["objects"]: stats_dict["objects"][str(obj_syn[0])] = Counter() stats_dict["objects"][str(obj_syn[0])][pred] += 1 return stats_dict
40.351351
100
0.686537
352f800ac7f7c22284c7b44feba8fd6e1ee15c6f
10,327
py
Python
utils.py
NguyenThanhAI/BigGAN-Tensorflow
f6f033009f4c1796966898b6a50f055676d1a1c1
[ "MIT" ]
null
null
null
utils.py
NguyenThanhAI/BigGAN-Tensorflow
f6f033009f4c1796966898b6a50f055676d1a1c1
[ "MIT" ]
null
null
null
utils.py
NguyenThanhAI/BigGAN-Tensorflow
f6f033009f4c1796966898b6a50f055676d1a1c1
[ "MIT" ]
null
null
null
import scipy.misc import numpy as np import os from glob import glob import tensorflow as tf import tensorflow.contrib.slim as slim from keras.datasets import cifar10, mnist class ImageData: def __init__(self, load_size, channels, custom_dataset): self.load_size = load_size self.channels = channels self.custom_dataset = custom_dataset def _crop(self, image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, self.channels]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.cast(tf.reshape(image, cropped_shape), dtype=tf.float32) def _random_crop(self, image_list, crop_height, crop_width): """Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150) Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the new height. crop_width: the new width. Returns: the image_list with cropped images. Raises: ValueError: if there are multiple image inputs provided with different size or the images are smaller than the crop dimensions. """ if not image_list: raise ValueError('Empty image_list.') # Compute the rank assertions. rank_assertions = [] for i in range(len(image_list)): image_rank = tf.rank(image_list[i]) rank_assert = tf.Assert( tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank]) rank_assertions.append(rank_assert) with tf.control_dependencies([rank_assertions[0]]): image_shape = tf.shape(image_list[0]) image_height = image_shape[0] image_width = image_shape[1] crop_size_assert = tf.Assert( tf.logical_and( tf.greater_equal(image_height, crop_height), tf.greater_equal(image_width, crop_width)), ['Crop size greater than the image size.']) asserts = [rank_assertions[0], crop_size_assert] for i in range(1, len(image_list)): image = image_list[i] asserts.append(rank_assertions[i]) with tf.control_dependencies([rank_assertions[i]]): shape = tf.shape(image) height = shape[0] width = shape[1] height_assert = tf.Assert( tf.equal(height, image_height), ['Wrong height for tensor %s [expected][actual]', image.name, height, image_height]) width_assert = tf.Assert( tf.equal(width, image_width), ['Wrong width for tensor %s [expected][actual]', image.name, width, image_width]) asserts.extend([height_assert, width_assert]) # Create a random bounding box. # # Use tf.random_uniform and not numpy.random.rand as doing the former would # generate random numbers at graph eval time, unlike the latter which # generates random numbers at graph definition time. with tf.control_dependencies(asserts): max_offset_height = tf.reshape(image_height - crop_height + 1, []) with tf.control_dependencies(asserts): max_offset_width = tf.reshape(image_width - crop_width + 1, []) offset_height = tf.random_uniform( [], maxval=max_offset_height, dtype=tf.int32) offset_width = tf.random_uniform( [], maxval=max_offset_width, dtype=tf.int32) return [self._crop(image, offset_height, offset_width, crop_height, crop_width) for image in image_list] def image_processing(self, filename): if not self.custom_dataset : x_decode = filename else : x = tf.read_file(filename) x_decode = tf.image.decode_jpeg(x, channels=self.channels) #img = tf.image.resize_images(x_decode, [self.load_size, self.load_size]) shape = tf.shape(x_decode) img = tf.cond(tf.logical_and(tf.greater_equal(shape[0], self.load_size), tf.greater_equal(shape[1], self.load_size)), lambda : self._random_crop([x_decode], crop_height=self.load_size, crop_width=self.load_size)[0], lambda : tf.image.resize_images(x_decode, [self.load_size, self.load_size])) img.set_shape([self.load_size, self.load_size, self.channels]) img = tf.cast(img, tf.float32) / 127.5 - 1 return img def load_mnist(): (train_data, train_labels), (test_data, test_labels) = mnist.load_data() x = np.concatenate((train_data, test_data), axis=0) x = np.expand_dims(x, axis=-1) return x def load_cifar10() : (train_data, train_labels), (test_data, test_labels) = cifar10.load_data() x = np.concatenate((train_data, test_data), axis=0) return x def load_data(dataset_name) : if dataset_name == 'mnist' : x = load_mnist() elif dataset_name == 'cifar10' : x = load_cifar10() else : #x = glob(os.path.join("./dataset", dataset_name, '*.*')) x = [] for dirs, _, files in os.walk(os.path.join("./dataset", dataset_name)): for file in files: x.append(os.path.join(dirs, file)) return x def preprocessing(x, size): x = scipy.misc.imread(x, mode='RGB') x = scipy.misc.imresize(x, [size, size]) x = normalize(x) return x def normalize(x) : return x/127.5 - 1 def save_images(images, size, image_path): return imsave(inverse_transform(images), size, image_path) def merge(images, size): h, w = images.shape[1], images.shape[2] if (images.shape[3] in (3,4)): c = images.shape[3] img = np.zeros((h * size[0], w * size[1], c)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img elif images.shape[3]==1: img = np.zeros((h * size[0], w * size[1])) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0] return img else: raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4') def imsave(images, size, path): # image = np.squeeze(merge(images, size)) # 채널이 1인거 제거 ? return scipy.misc.imsave(path, merge(images, size)) def inverse_transform(images): return (images+1.)/2. def check_folder(log_dir): if not os.path.exists(log_dir): os.makedirs(log_dir) return log_dir def show_all_variables(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) def str2bool(x): return x.lower() in ('true') ################################################################################## # Regularization ################################################################################## def orthogonal_regularizer(scale) : """ Defining the Orthogonal regularizer and return the function at last to be used in Conv layer as kernel regularizer""" def ortho_reg(w) : """ Reshaping the matrxi in to 2D tensor for enforcing orthogonality""" _, _, _, c = w.get_shape().as_list() w = tf.reshape(w, [-1, c]) """ Declaring a Identity Tensor of appropriate size""" identity = tf.eye(c) """ Regularizer Wt*W - I """ w_transpose = tf.transpose(w) w_mul = tf.matmul(w_transpose, w) reg = tf.subtract(w_mul, identity) """Calculating the Loss Obtained""" ortho_loss = tf.nn.l2_loss(reg) return scale * ortho_loss return ortho_reg def orthogonal_regularizer_fully(scale) : """ Defining the Orthogonal regularizer and return the function at last to be used in Fully Connected Layer """ def ortho_reg_fully(w) : """ Reshaping the matrix in to 2D tensor for enforcing orthogonality""" _, c = w.get_shape().as_list() """Declaring a Identity Tensor of appropriate size""" identity = tf.eye(c) w_transpose = tf.transpose(w) w_mul = tf.matmul(w_transpose, w) reg = tf.subtract(w_mul, identity) """ Calculating the Loss """ ortho_loss = tf.nn.l2_loss(reg) return scale * ortho_loss return ortho_reg_fully
36.75089
125
0.607824
644d4dc97b545a1c4db152c38caea0b94a11ea98
6,250
py
Python
landlab/components/overland_flow/generate_overland_flow_kinwave.py
clebouteiller/landlab
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
[ "MIT" ]
1
2022-01-07T02:36:07.000Z
2022-01-07T02:36:07.000Z
landlab/components/overland_flow/generate_overland_flow_kinwave.py
clebouteiller/landlab
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
[ "MIT" ]
1
2021-11-11T21:23:46.000Z
2021-11-11T21:23:46.000Z
landlab/components/overland_flow/generate_overland_flow_kinwave.py
clebouteiller/landlab
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
[ "MIT" ]
2
2019-08-19T08:58:10.000Z
2022-01-07T02:36:01.000Z
# -*- coding: utf-8 -*- """Landlab component for overland flow using the kinematic-wave approximation. Created on Fri May 27 14:26:13 2016 @author: gtucker """ import numpy as np from landlab import Component class KinwaveOverlandFlowModel(Component): """Calculate water flow over topography. Landlab component that implements a two-dimensional kinematic wave model. This is an extremely simple, unsophisticated model, originally built simply to demonstrate the component creation process. Limitations to the present version include: infiltration is handled very crudely, the called is responsible for picking a stable time step size (no adaptive time stepping is used in the `run_one_step` method), precipitation rate is constant for a given duration (then zero), and all parameters are uniform in space. Also, the terrain is assumed to be stable over time. Caveat emptor! Examples -------- >>> from landlab import RasterModelGrid >>> rg = RasterModelGrid((4, 5), xy_spacing=10.0) >>> z = rg.add_zeros("topographic__elevation", at="node") >>> s = rg.add_zeros("topographic__gradient", at="link") >>> kw = KinwaveOverlandFlowModel(rg) >>> kw.vel_coef 100.0 >>> rg.at_node['surface_water__depth'] array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) References ---------- **Required Software Citation(s) Specific to this Component** None Listed **Additional References** None Listed """ _name = "KinwaveOverlandFlowModel" _unit_agnostic = False _info = { "surface_water__depth": { "dtype": float, "intent": "out", "optional": False, "units": "m", "mapping": "node", "doc": "Depth of water on the surface", }, "topographic__elevation": { "dtype": float, "intent": "in", "optional": False, "units": "m", "mapping": "node", "doc": "Land surface topographic elevation", }, "topographic__gradient": { "dtype": float, "intent": "in", "optional": False, "units": "m/m", "mapping": "link", "doc": "Gradient of the ground surface", }, "water__specific_discharge": { "dtype": float, "intent": "out", "optional": False, "units": "m2/s", "mapping": "link", "doc": "flow discharge component in the direction of the link", }, "water__velocity": { "dtype": float, "intent": "out", "optional": False, "units": "m/s", "mapping": "link", "doc": "flow velocity component in the direction of the link", }, } def __init__( self, grid, precip_rate=1.0, precip_duration=1.0, infilt_rate=0.0, roughness=0.01, ): """Initialize the KinwaveOverlandFlowModel. Parameters ---------- grid : ModelGrid Landlab ModelGrid object precip_rate : float, optional (defaults to 1 mm/hr) Precipitation rate, mm/hr precip_duration : float, optional (defaults to 1 hour) Duration of precipitation, hours infilt_rate : float, optional (defaults to 0) Maximum rate of infiltration, mm/hr roughness : float, defaults to 0.01 Manning roughness coefficient, s/m^1/3 """ super().__init__(grid) # Store parameters and do unit conversion self._current_time = 0 self._precip = precip_rate / 3600000.0 # convert to m/s self._precip_duration = precip_duration * 3600.0 # h->s self._infilt = infilt_rate / 3600000.0 # convert to m/s self._vel_coef = 1.0 / roughness # do division now to save time # Create fields... # Elevation self._elev = grid.at_node["topographic__elevation"] # Slope self._slope = grid.at_link["topographic__gradient"] self.initialize_output_fields() self._depth = grid.at_node["surface_water__depth"] self._vel = grid.at_link["water__velocity"] self._disch = grid.at_link["water__specific_discharge"] # Calculate the ground-surface slope (assume it won't change) self._slope[self._grid.active_links] = self._grid.calc_grad_at_link(self._elev)[ self._grid.active_links ] self._sqrt_slope = np.sqrt(self._slope) self._sign_slope = np.sign(self._slope) @property def vel_coef(self): """Velocity coefficient. (1/roughness) """ return self._vel_coef def run_one_step(self, dt): """Calculate water flow for a time period `dt`. Default units for dt are *seconds*. """ # Calculate water depth at links. This implements an "upwind" scheme # in which water depth at the links is the depth at the higher of the # two nodes. H_link = self._grid.map_value_at_max_node_to_link( "topographic__elevation", "surface_water__depth" ) # Calculate velocity using the Manning equation. self._vel = ( -self._sign_slope * self._vel_coef * H_link**0.66667 * self._sqrt_slope ) # Calculate discharge self._disch[:] = H_link * self._vel # Flux divergence dqda = self._grid.calc_flux_div_at_node(self._disch) # Rate of change of water depth if self._current_time < self._precip_duration: ppt = self._precip else: ppt = 0.0 dHdt = ppt - self._infilt - dqda # Update water depth: simple forward Euler scheme self._depth[self._grid.core_nodes] += dHdt[self._grid.core_nodes] * dt # Very crude numerical hack: prevent negative water depth self._depth[np.where(self._depth < 0.0)[0]] = 0.0 self._current_time += dt if __name__ == "__main__": import doctest doctest.testmod()
30.788177
88
0.58272
f56eb631baf139d1c40630ec7187042c0acf8976
10,266
py
Python
python_modules/dagster/dagster_tests/cli_tests/command_tests/test_schedule_commands.py
emilyhawkins-drizly/dagster
bfb90e8b0b442f657e5256082d3116aefa8c330b
[ "Apache-2.0" ]
1
2021-07-03T09:05:58.000Z
2021-07-03T09:05:58.000Z
python_modules/dagster/dagster_tests/cli_tests/command_tests/test_schedule_commands.py
emilyhawkins-drizly/dagster
bfb90e8b0b442f657e5256082d3116aefa8c330b
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster_tests/cli_tests/command_tests/test_schedule_commands.py
emilyhawkins-drizly/dagster
bfb90e8b0b442f657e5256082d3116aefa8c330b
[ "Apache-2.0" ]
null
null
null
import re import click import mock import pytest from click.testing import CliRunner from dagster.cli.schedule import ( check_repo_and_scheduler, schedule_list_command, schedule_logs_command, schedule_restart_command, schedule_start_command, schedule_stop_command, schedule_up_command, schedule_wipe_command, ) from dagster.core.host_representation import ExternalRepository from dagster.core.instance import DagsterInstance from dagster.core.test_utils import environ from .test_cli_commands import schedule_command_contexts @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_list(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_list_command, cli_args) if result.exception: raise result.exception assert result.exit_code == 0 assert result.output == ("Repository bar\n" "**************\n") @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_up(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke( schedule_up_command, cli_args, ) assert result.exit_code == 0 assert "Changes:\n" in result.output assert " + foo_schedule (add)" in result.output assert " + partitioned_schedule (add)" in result.output @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_up_and_list(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke(schedule_list_command, cli_args) assert result.exit_code == 0 assert ( result.output == "Repository bar\n" "**************\n" "Schedule: foo_schedule [STOPPED]\n" "Cron Schedule: * * * * *\n" "****************************************\n" "Schedule: partitioned_schedule [STOPPED]\n" "Cron Schedule: * * * * *\n" ) @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_start_and_stop(gen_schedule_args): with gen_schedule_args as (cli_args, instance): with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance runner = CliRunner() result = runner.invoke( schedule_up_command, cli_args, ) result = runner.invoke( schedule_start_command, cli_args + ["foo_schedule"], ) assert result.exit_code == 0 assert "Started schedule foo_schedule\n" == result.output result = runner.invoke( schedule_stop_command, cli_args + ["foo_schedule"], ) assert result.exit_code == 0 assert "Stopped schedule foo_schedule\n" == result.output @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_start_empty(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke( schedule_start_command, cli_args, ) assert result.exit_code == 0 assert "Noop: dagster schedule start was called without any arguments" in result.output @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_start_all(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke( schedule_start_command, cli_args + ["--start-all"], ) assert result.exit_code == 0 assert result.output == "Started all schedules for repository bar\n" @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_wipe_correct_delete_message(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke( schedule_wipe_command, cli_args, input="DELETE\n", ) if result.exception: raise result.exception assert result.exit_code == 0 assert "Turned off all schedules and deleted all schedule history" in result.output result = runner.invoke( schedule_up_command, cli_args + ["--preview"], ) # Verify schedules were wiped assert result.exit_code == 0 assert "Planned Schedule Changes:\n" in result.output assert " + partitioned_schedule (add)" in result.output assert " + foo_schedule (add)" in result.output @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_wipe_incorrect_delete_message(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke( schedule_wipe_command, cli_args, input="WRONG\n", ) assert result.exit_code == 0 assert ( "Exiting without turning off schedules or deleting schedule history" in result.output ) result = runner.invoke( schedule_up_command, cli_args + ["--preview"], ) # Verify schedules were not wiped assert result.exit_code == 0 assert ( result.output == "No planned changes to schedules.\n2 schedules will remain unchanged\n" ) @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_restart(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke( schedule_start_command, cli_args + ["foo_schedule"], ) result = runner.invoke( schedule_restart_command, cli_args + ["foo_schedule"], ) assert result.exit_code == 0 assert "Restarted schedule foo_schedule" in result.output @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_restart_all(gen_schedule_args): with gen_schedule_args as (cli_args, instance): runner = CliRunner() with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance result = runner.invoke(schedule_up_command, cli_args) result = runner.invoke( schedule_start_command, cli_args + ["foo_schedule"], ) result = runner.invoke( schedule_restart_command, cli_args + ["foo_schedule", "--restart-all-running"], ) assert result.exit_code == 0 assert result.output == "Restarted all running schedules for repository bar\n" @pytest.mark.parametrize("gen_schedule_args", schedule_command_contexts()) def test_schedules_logs(gen_schedule_args): with gen_schedule_args as (cli_args, instance): with mock.patch("dagster.core.instance.DagsterInstance.get") as _instance: _instance.return_value = instance runner = CliRunner() result = runner.invoke(schedule_logs_command, cli_args + ["foo_schedule"]) assert result.exit_code == 0 assert "scheduler.log" in result.output def test_check_repo_and_scheduler_no_external_schedules(): repository = mock.MagicMock(spec=ExternalRepository) repository.get_external_schedules.return_value = [] instance = mock.MagicMock(spec=DagsterInstance) with pytest.raises(click.UsageError, match="There are no schedules defined for repository"): check_repo_and_scheduler(repository, instance) def test_check_repo_and_scheduler_dagster_home_not_set(): with environ({"DAGSTER_HOME": ""}): repository = mock.MagicMock(spec=ExternalRepository) repository.get_external_schedules.return_value = [mock.MagicMock()] instance = mock.MagicMock(spec=DagsterInstance) with pytest.raises( click.UsageError, match=re.escape("The environment variable $DAGSTER_HOME is not set.") ): check_repo_and_scheduler(repository, instance)
36.664286
99
0.635301
d786ed7c4890f77528f3f4e6ad0221678f72655f
5,595
py
Python
TeradataVantageFunctionsPlugin/python-lib/inputtableinfo.py
Teradata/vantage-dss-plugin-mle-functions
de226b0a5431439d264557e289d571edca579d31
[ "MIT" ]
1
2019-11-13T01:28:46.000Z
2019-11-13T01:28:46.000Z
TeradataVantageFunctionsPlugin/python-lib/inputtableinfo.py
Teradata/vantage-dss-plugin-mle-functions
de226b0a5431439d264557e289d571edca579d31
[ "MIT" ]
1
2019-10-08T23:28:10.000Z
2019-10-08T23:28:10.000Z
TeradataVantageFunctionsPlugin/python-lib/inputtableinfo.py
Teradata/vantage-dss-plugin-mle-functions
de226b0a5431439d264557e289d571edca579d31
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' Copyright © 2019 by Teradata. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import tableinfo from pseudoconstantgetters import * class inputtableinfo(tableinfo.tableinfo): def __init__(self, connectioninfo, datasetname, dss_function): super(inputtableinfo, self).__init__(connectioninfo, datasetname) self.__partitionKey = self.__getPartitionKeyFromFunctionDef(dss_function) self.__orderKey = self.__getOrderByKeyFromInputDef(dss_function) self.__dssfunction = dss_function self.__alias = '' @property def schemaname(self): return self._schemaname @property def tablenamewithoutschema(self): return self._tablename @property def partitionKey(self): return self.__partitionKey @property def orderKey(self): return self.__orderKey @property def alias(self): return self.__alias # Added useCoprocessor at end to get if coprocessor def setPropertiesFromDef(self, inputdef, useCoprocessor): tablealias = inputdef.get('name', '') tmpAlias = tablealias tmpAlias = '' if 'Dimension' == tablealias else tablealias # Test Add new alias if has alternateNames # print('Testing alternateNames') alternateNames = inputdef.get('alternateNames', []) # print(alternateNames) # print(alternateNames[0]) # print(alternateNames[0].encode("utf-8")) # if alternate tables # if tmpAlias = '' if 'input' == tablealias else tablealias tmpAlias = alternateNames[0].encode("utf-8") if alternateNames != [] else tmpAlias self.__alias = tmpAlias self.__partitionKey = self.__getPartitionClauseFromAliasedInputDef( inputdef.get('kind', 'DSSOTHERS'), inputdef) self.__orderKey = self.__getOrderByKeyFromInputDef(inputdef) def __getPartitionClauseFromAliasedInputDef(self, kind, inputdef): partitionbycolumn = inputdef.get('partitionAttributes', '') if isinstance(partitionbycolumn, (list, tuple)): return getPartitionKind(kind) +\ (', '.join(partitionbycolumn) if 'PartitionByKey' == kind else '') return getPartitionKind(kind) +\ (partitionbycolumn if 'PartitionByKey' == kind else '') def __getPartitionAttributes(self, inputdef): return ', '.join(inputdef.get('partitionAttributes', [])) def __getPartitionClauseFromInputDef(self, kind, inputdef): return getPartitionKind(kind) +\ (self.__getPartitionAttributes(inputdef) if 'PartitionByKey' == kind else '') def __getPartitionKeyFromFunctionDef(self, dss_function): # partition kind = next(iter(dss_function.get("partitionInputKind",[])),'DSSOTHERS') return self.__getPartitionClauseFromInputDef(kind, dss_function) def __getOrderByKeyFromInputDef(self, inputdef): #no empty string checking for orderByColumn since this is mandatory if isOrdered is true orderKeyFromInputDef = inputdef.get("orderByColumn", ['']) orderKeyDirectionFromInputDef = inputdef.get("orderByColumnDirection", ['']) # print('Order type') # print(orderKeyFromInputDef) # print(orderKeyDirectionFromInputDef) # print(orderKeyDirectionFromInputDef) # if orderKeyFromInputDef != [] or orderKeyFromInputDef == [''] or (len(orderKeyFromInputDef) == 0 and orderKeyFromInputDef[0].encode('ascii','ignore') != '' ): # return ', '.join([a + b for a,b in zip(orderKeyFromInputDef,orderKeyDirectionFromInputDef)]) if \ # isinstance(orderKeyFromInputDef, (list, tuple)) else orderKeyFromInputDef #Add DIRECTION # print('isinstance?') # print(isinstance(orderKeyFromInputDef, (list, tuple))) # print(isinstance(orderKeyDirectionFromInputDef, (list, tuple))) if isinstance(orderKeyFromInputDef, (list, tuple)) and orderKeyFromInputDef != [''] and orderKeyFromInputDef[] != [None]: returnValue = ', '.join([a + " " + b for a,b in zip(orderKeyFromInputDef,orderKeyDirectionFromInputDef)]) #print(returnValue) return ', '.join([a + " " + b for a,b in zip(orderKeyFromInputDef,orderKeyDirectionFromInputDef)]) else: return orderKeyFromInputDef # return ', '.join(orderKeyFromInputDef) if \ # isinstance(orderKeyFromInputDef, (list, tuple)) else orderKeyFromInputDef #Add DIRECTION # else: # return ['']
46.239669
168
0.687399
5c77fcb56d7888534ff0a4765795a1ae9a164106
962
py
Python
DjangoBookWebApp/views.py
tsteternlieb/BookRecHeroku
389e25639f031e93d04e02340394c71dbe68b156
[ "MIT" ]
null
null
null
DjangoBookWebApp/views.py
tsteternlieb/BookRecHeroku
389e25639f031e93d04e02340394c71dbe68b156
[ "MIT" ]
null
null
null
DjangoBookWebApp/views.py
tsteternlieb/BookRecHeroku
389e25639f031e93d04e02340394c71dbe68b156
[ "MIT" ]
null
null
null
from django.shortcuts import render from DjangoBookWebApp.book_search_model import FinalWrapper model = FinalWrapper() # our home page view def home(request): return render(request, 'index.html') # custom method for generating predictions def getPredictions(pclass): #model = pickle.load(open("titanic_survival_ml_model.sav", "rb")) #scaled = pickle.load(open("scaler.sav", "rb")) #prediction = model.predict(sc.transform([[pclass, sex, age, sibsp, parch, fare, C, Q, S]])) #model = FinalWrapper() if pclass[0] == '"': pclass = pclass[1:] if pclass[-1] == '"': pclass = pclass[:-1] predictions = model.GetRecs(pclass) final = '' for book in predictions: final += book + "\n" return predictions # our result page view def result(request): pclass = str(request.GET['pclass']) result = getPredictions(pclass) return render(request, 'result.html', {'result':result})
28.294118
96
0.652807
c0e835b5b80a18891f5c0fb0f2af5671d958103f
1,590
py
Python
VirusServer/vyterm/vythug/kernel.py
Vyterm/VytHug
22b4c6708a23898d41fc49d604c54790b4f8c965
[ "MIT" ]
null
null
null
VirusServer/vyterm/vythug/kernel.py
Vyterm/VytHug
22b4c6708a23898d41fc49d604c54790b4f8c965
[ "MIT" ]
null
null
null
VirusServer/vyterm/vythug/kernel.py
Vyterm/VytHug
22b4c6708a23898d41fc49d604c54790b4f8c965
[ "MIT" ]
null
null
null
from abc import abstractmethod from enum import Enum from vyterm.sqladapter import SqlAdapter from vyterm.utils import singleton, get_instance from vyterm.cryptography import md5str class OpCommand(Enum): Kernel = 0 Virus = 1 class KernelCommand(Enum): Version = 0 class VirusCommand(Enum): Check = 0 Over = 1 Submit = 2 @get_instance @singleton class Caches(object): def __init__(self): self.mysql = SqlAdapter(db="virus_db", password="1JXahbu230x1Zehim88t") self.viruses = {} for virus in self.mysql.select("select `virus_tag`, `virus_name` from `md5_virus_table`;"): tag, name = virus self.viruses[tag] = name def is_virus(self, virus_md5: str) -> bool: return virus_md5 in self.viruses def get_name(self, virus_md5: str) -> str: return self.viruses[virus_md5] def submit(self, virus_md5: str): self.mysql.execute("insert into `md5_virus_table`(`virus_tag`) values ('%s');" % virus_md5) self.viruses[virus_md5] = "Trojan" class Handler(object): @property @abstractmethod def handlers(self): pass def execute(self, client, command, packet): if command in self.handlers: self.handlers[command](client, packet) return True else: return False @abstractmethod def logout(self, client): pass if __name__ == '__main__': assert id(Caches()) == id(Caches()) assert id(Caches.get()) == id(Caches.get()) print("All tests of VytHug.kernel passed!") pass
23.731343
99
0.642767
57738c519f4f0130b4e13f231cc3471d1e18be37
1,178
py
Python
runtests.py
jpleger/django-analystnotes
2e2905a1a86ef8069f9381f33d95b645f2e37f77
[ "BSD-3-Clause" ]
null
null
null
runtests.py
jpleger/django-analystnotes
2e2905a1a86ef8069f9381f33d95b645f2e37f77
[ "BSD-3-Clause" ]
1
2021-03-19T22:04:03.000Z
2021-03-19T22:04:03.000Z
runtests.py
jpleger/django-analystnotes
2e2905a1a86ef8069f9381f33d95b645f2e37f77
[ "BSD-3-Clause" ]
null
null
null
import sys try: from django.conf import settings settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", } }, ROOT_URLCONF="analystnotes.urls", INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", "analystnotes", ], SITE_ID=1, NOSE_ARGS=['-s'], MIDDLEWARE_CLASSES=(), ) try: import django setup = django.setup except AttributeError: pass else: setup() from django_nose import NoseTestSuiteRunner except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements-test.txt") def run_tests(*test_args): if not test_args: test_args = ['tests'] # Run tests test_runner = NoseTestSuiteRunner(verbosity=1) failures = test_runner.run_tests(test_args) if failures: sys.exit(failures) if __name__ == '__main__': run_tests(*sys.argv[1:])
21.035714
85
0.578947
564626efcde7f1496268eee26093e7a2f5272b66
7,675
py
Python
stubs.min/System/Windows/Forms/__init___parts/TrackBarRenderer.py
ricardyn/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2021-02-02T13:39:16.000Z
2021-02-02T13:39:16.000Z
stubs.min/System/Windows/Forms/__init___parts/TrackBarRenderer.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/System/Windows/Forms/__init___parts/TrackBarRenderer.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
class TrackBarRenderer(object): """ Provides methods used to render a track bar control with visual styles. This class cannot be inherited. """ @staticmethod def DrawBottomPointingThumb(g,bounds,state): """ DrawBottomPointingThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws a downward-pointing track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawHorizontalThumb(g,bounds,state): """ DrawHorizontalThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws a horizontal track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawHorizontalTicks(g,bounds,numTicks,edgeStyle): """ DrawHorizontalTicks(g: Graphics,bounds: Rectangle,numTicks: int,edgeStyle: EdgeStyle) Draws the specified number of horizontal track bar ticks with visual styles. g: The System.Drawing.Graphics used to draw the ticks. bounds: The System.Drawing.Rectangle that specifies the bounds of the ticks. numTicks: The number of ticks to draw. edgeStyle: One of the System.Windows.Forms.VisualStyles.EdgeStyle values. """ pass @staticmethod def DrawHorizontalTrack(g,bounds): """ DrawHorizontalTrack(g: Graphics,bounds: Rectangle) Draws the track for a horizontal track bar with visual styles. g: The System.Drawing.Graphics used to draw the track. bounds: The System.Drawing.Rectangle that specifies the bounds of the track. """ pass @staticmethod def DrawLeftPointingThumb(g,bounds,state): """ DrawLeftPointingThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws a left-pointing track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawRightPointingThumb(g,bounds,state): """ DrawRightPointingThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws a right-pointing track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawTopPointingThumb(g,bounds,state): """ DrawTopPointingThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws an upward-pointing track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawVerticalThumb(g,bounds,state): """ DrawVerticalThumb(g: Graphics,bounds: Rectangle,state: TrackBarThumbState) Draws a vertical track bar slider (also known as the thumb) with visual styles. g: The System.Drawing.Graphics used to draw the track bar slider. bounds: The System.Drawing.Rectangle that specifies the bounds of the track bar slider. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. """ pass @staticmethod def DrawVerticalTicks(g,bounds,numTicks,edgeStyle): """ DrawVerticalTicks(g: Graphics,bounds: Rectangle,numTicks: int,edgeStyle: EdgeStyle) Draws the specified number of vertical track bar ticks with visual styles. g: The System.Drawing.Graphics used to draw the ticks. bounds: The System.Drawing.Rectangle that specifies the bounds of the ticks. numTicks: The number of ticks to draw. edgeStyle: One of the System.Windows.Forms.VisualStyles.EdgeStyle values. """ pass @staticmethod def DrawVerticalTrack(g,bounds): """ DrawVerticalTrack(g: Graphics,bounds: Rectangle) Draws the track for a vertical track bar with visual styles. g: The System.Drawing.Graphics used to draw the track. bounds: The System.Drawing.Rectangle that specifies the bounds of the track. """ pass @staticmethod def GetBottomPointingThumbSize(g,state): """ GetBottomPointingThumbSize(g: Graphics,state: TrackBarThumbState) -> Size Returns the size,in pixels,of the track bar slider (also known as the thumb) that points down. g: The System.Drawing.Graphics this operation will use. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the track bar slider. Returns: A System.Drawing.Size that specifies the size,in pixels,of the slider. """ pass @staticmethod def GetLeftPointingThumbSize(g,state): """ GetLeftPointingThumbSize(g: Graphics,state: TrackBarThumbState) -> Size Returns the size,in pixels,of the track bar slider (also known as the thumb) that points to the left. g: The System.Drawing.Graphics this operation will use. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the slider. Returns: A System.Drawing.Size that specifies the size,in pixels,of the slider. """ pass @staticmethod def GetRightPointingThumbSize(g,state): """ GetRightPointingThumbSize(g: Graphics,state: TrackBarThumbState) -> Size Returns the size,in pixels,of the track bar slider (also known as the thumb) that points to the right. g: The System.Drawing.Graphics this operation will use. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the slider. Returns: A System.Drawing.Size that specifies the size,in pixels,of the slider. """ pass @staticmethod def GetTopPointingThumbSize(g,state): """ GetTopPointingThumbSize(g: Graphics,state: TrackBarThumbState) -> Size Returns the size,in pixels,of the track bar slider (also known as the thumb) that points up. g: The System.Drawing.Graphics this operation will use. state: One of the System.Windows.Forms.VisualStyles.TrackBarThumbState values that specifies the visual state of the slider. Returns: A System.Drawing.Size that specifies the size,in pixels,of the slider. """ pass IsSupported=True
39.358974
113
0.722345
a54c214ad8c6c89ce9fe6e932ad366bda6265c70
1,655
py
Python
data-generation/openml-datasets/cp-to-hdfs.py
VIDA-NYU/prida
cb2af13704506abc73d10f5c346ea21f70dd6e65
[ "BSD-3-Clause" ]
1
2021-06-12T02:03:54.000Z
2021-06-12T02:03:54.000Z
data-generation/openml-datasets/cp-to-hdfs.py
VIDA-NYU/prida
cb2af13704506abc73d10f5c346ea21f70dd6e65
[ "BSD-3-Clause" ]
null
null
null
data-generation/openml-datasets/cp-to-hdfs.py
VIDA-NYU/prida
cb2af13704506abc73d10f5c346ea21f70dd6e65
[ "BSD-3-Clause" ]
null
null
null
import os import subprocess import sys if __name__ == '__main__': original_data_dir = sys.argv[1] hdfs_data_dir = sys.argv[2] if hdfs_data_dir.endswith('/'): hdfs_data_dir = hdfs_data_dir[:-1] if hdfs_data_dir.strip() == '': sys.exit(0) # Making sure HDFS dir is empty subprocess.call('hdfs dfs -rm -r %s/*' % hdfs_data_dir, shell=True) # Copying files datasets_ = os.listdir(original_data_dir) datasets = list() for dataset in datasets_: data_path = os.path.join(original_data_dir, dataset, dataset + '_dataset', 'tables', 'learningData.csv') data_size = os.stat(data_path).st_size/float(1073741824) if data_size <= 0.2: datasets.append(dataset) count = 1 for dataset in datasets: print("Uploading %s ... (%.4f%%)" % (dataset, (float(count)*100)/len(datasets))) dataset_doc = os.path.join(original_data_dir, dataset, dataset + '_dataset', 'datasetDoc.json') dataset_path = os.path.join(original_data_dir, dataset, dataset + '_dataset', 'tables', 'learningData.csv') problem_doc = os.path.join(original_data_dir, dataset, dataset + '_problem', 'problemDoc.json') subprocess.call('hdfs dfs -mkdir \'%s/%s\'' % (hdfs_data_dir, dataset), shell=True) subprocess.call('hdfs dfs -put \'%s\' \'%s/%s/\'' % (dataset_doc, hdfs_data_dir, dataset), shell=True) subprocess.call('hdfs dfs -put \'%s\' \'%s/%s/\'' % (problem_doc, hdfs_data_dir, dataset), shell=True) subprocess.call('hdfs dfs -put \'%s\' \'%s/%s/\'' % (dataset_path, hdfs_data_dir, dataset), shell=True) count += 1
38.488372
115
0.636858
ee84ab255439f4134ea4836d8988738c823d5280
683
py
Python
src/Data Generation/preprocess_embeddings.py
AlexJonesNLP/XLAnalysis5K
6cda1ad9d3f8133943cf736a554a646c865ebb4b
[ "MIT" ]
null
null
null
src/Data Generation/preprocess_embeddings.py
AlexJonesNLP/XLAnalysis5K
6cda1ad9d3f8133943cf736a554a646c865ebb4b
[ "MIT" ]
null
null
null
src/Data Generation/preprocess_embeddings.py
AlexJonesNLP/XLAnalysis5K
6cda1ad9d3f8133943cf736a554a646c865ebb4b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import numpy as np from sklearn.preprocessing import normalize from typing import List def preprocess_embeddings(vecs: np.ndarray) -> np.ndarray: ''' Preprocess embeddings before performing isomorphism computations Procedure consists of normalization, mean-centering, and re-normalization ''' npvecs = np.vstack(vecs) # Step 1: Length normalize npvecs = normalize(npvecs, axis=1, norm='l2') # Step 2: Mean centering npvecs = npvecs - npvecs.mean(0) # Step 3: Length normalize again npvecs = normalize(npvecs, axis=1, norm='l2') # Return double-normalized and mean-centered vectors return npvecs
29.695652
77
0.696925
4b6dbee952b0430b330a8b4e06255284b31f39d0
882
py
Python
tests/mocks/test_tables.py
keboola/sapi-python-client
276bcebfc0224f89d8ec5e9dd0b94cc15a6596b9
[ "MIT" ]
3
2017-07-13T13:17:37.000Z
2017-11-08T19:01:29.000Z
tests/mocks/test_tables.py
keboola/sapi-python-client
276bcebfc0224f89d8ec5e9dd0b94cc15a6596b9
[ "MIT" ]
45
2017-07-13T07:49:51.000Z
2021-08-19T15:43:39.000Z
tests/mocks/test_tables.py
keboola/sapi-python-client
276bcebfc0224f89d8ec5e9dd0b94cc15a6596b9
[ "MIT" ]
5
2017-07-14T05:50:49.000Z
2021-12-28T17:01:29.000Z
""" Test basic functionality of the Tables endpoint """ import unittest import responses from kbcstorage.tables import Tables from .table_responses import list_response class TestTablesEndpointWithMocks(unittest.TestCase): """ Test the methods of a Tables endpoint instance with mock HTTP responses """ def setUp(self): token = 'dummy_token' base_url = 'https://connection.keboola.com/' self.tables = Tables(base_url, token) @responses.activate def test_list(self): """ Tables mocks list correctly """ responses.add( responses.Response( method='GET', url='https://connection.keboola.com/v2/storage/tables', json=list_response ) ) tables_list = self.tables.list() assert isinstance(tables_list, list)
24.5
75
0.622449
8296bfbc47a8391b9a7b4f5fb4c37dbb9b4667dc
899
py
Python
setup.py
riquedev/SSLProxies24Feed
93ab23a6794ae7f40002eb464a9c443afe44db86
[ "MIT" ]
null
null
null
setup.py
riquedev/SSLProxies24Feed
93ab23a6794ae7f40002eb464a9c443afe44db86
[ "MIT" ]
1
2017-09-15T13:27:09.000Z
2017-09-15T14:43:28.000Z
setup.py
riquedev/SSLProxies24Feed
93ab23a6794ae7f40002eb464a9c443afe44db86
[ "MIT" ]
null
null
null
from os import path from setuptools import setup here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README'), encoding='utf-8') as f: long_description = f.read() package_name = "SSLProxies24" package_version = "1.3.6" setup( name=package_name, version=package_version, packages=[package_name], description='Obtenha e valide vários Proxys diariamente com este pacote. (Thread).', long_description=long_description, requires=["requests", "defusedxml"], python_requires='>=3.6', license='MIT', author="Henrique da Silva Santos (rique_dev)", author_email="[email protected]", url="https://github.com/riquedev/SSLProxies24Feed", download_url='https://github.com/riquedev/SSLProxies24Feed/archive/master.zip', keywords=['proxy', 'ssl', '24', 'feed', 'blog', 'anonymous'], install_requires=["requests", "defusedxml"] )
33.296296
88
0.708565
9fe451e52c5429aeca2687e0d5531cd43507b9ce
20,525
py
Python
qiskit/aqua/utils/run_circuits.py
Nick-Singstock/qiskit-aqua
8c2bc57b78dec447faec3adbc966471a3206c2ef
[ "Apache-2.0" ]
1
2020-11-06T01:09:28.000Z
2020-11-06T01:09:28.000Z
qiskit/aqua/utils/run_circuits.py
Nick-Singstock/qiskit-aqua
8c2bc57b78dec447faec3adbc966471a3206c2ef
[ "Apache-2.0" ]
null
null
null
qiskit/aqua/utils/run_circuits.py
Nick-Singstock/qiskit-aqua
8c2bc57b78dec447faec3adbc966471a3206c2ef
[ "Apache-2.0" ]
1
2020-11-06T01:09:43.000Z
2020-11-06T01:09:43.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM Corp. 2017 and later. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. import sys import logging import time import copy import os import uuid import numpy as np from qiskit import compiler from qiskit.compiler.assembler import assemble_circuits from qiskit.providers import BaseBackend, JobStatus, JobError from qiskit.providers.basicaer import BasicAerJob from qiskit.qobj import QobjHeader from qiskit.aqua.aqua_error import AquaError from qiskit.aqua.utils import summarize_circuits from qiskit.aqua.utils.backend_utils import (is_aer_provider, is_basicaer_provider, is_ibmq_provider, is_simulator_backend, is_local_backend) MAX_CIRCUITS_PER_JOB = os.environ.get('QISKIT_AQUA_MAX_CIRCUITS_PER_JOB', None) logger = logging.getLogger(__name__) def find_regs_by_name(circuit, name, qreg=True): """Find the registers in the circuits. Args: circuit (QuantumCircuit): the quantum circuit. name (str): name of register qreg (bool): quantum or classical register Returns: QuantumRegister or ClassicalRegister or None: if not found, return None. """ found_reg = None regs = circuit.qregs if qreg else circuit.cregs for reg in regs: if reg.name == name: found_reg = reg break return found_reg def _avoid_empty_circuits(circuits): new_circuits = [] for qc in circuits: if len(qc) == 0: tmp_q = None for q in qc.qregs: tmp_q = q break if tmp_q is None: raise NameError("A QASM without any quantum register is invalid.") qc.iden(tmp_q[0]) new_circuits.append(qc) return new_circuits def _reuse_shared_circuits(circuits, backend, backend_config, compile_config, run_config, qjob_config=None, backend_options=None, show_circuit_summary=False): """Reuse the circuits with the shared head. We assume the 0-th circuit is the shared_circuit, so we execute it first and then use it as initial state for simulation. Note that all circuits should have the exact the same shared parts. """ qjob_config = qjob_config or {} backend_options = backend_options or {} shared_circuit = circuits[0] shared_result = compile_and_run_circuits(shared_circuit, backend, backend_config, compile_config, run_config, qjob_config, show_circuit_summary=show_circuit_summary) if len(circuits) == 1: return shared_result shared_quantum_state = np.asarray(shared_result.get_statevector(shared_circuit)) # extract different of circuits for circuit in circuits[1:]: circuit.data = circuit.data[len(shared_circuit):] temp_backend_options = copy.deepcopy(backend_options) if 'backend_options' not in temp_backend_options: temp_backend_options['backend_options'] = {} temp_backend_options['backend_options']['initial_statevector'] = shared_quantum_state diff_result = compile_and_run_circuits(circuits[1:], backend, backend_config, compile_config, run_config, qjob_config, backend_options=temp_backend_options, show_circuit_summary=show_circuit_summary) result = _combine_result_objects([shared_result, diff_result]) return result def _combine_result_objects(results): """Tempoary helper function. TODO: This function would be removed after Terra supports job with infinite circuits. """ if len(results) == 1: return results[0] new_result = copy.deepcopy(results[0]) for idx in range(1, len(results)): new_result.results.extend(results[idx].results) return new_result def _maybe_add_aer_expectation_instruction(qobj, options): if 'expectation' in options: from qiskit.providers.aer.utils.qobj_utils import snapshot_instr, append_instr, get_instr_pos # add others, how to derive the correct used number of qubits? # the compiled qobj could be wrong if coupling map is used. params = options['expectation']['params'] num_qubits = options['expectation']['num_qubits'] for idx in range(len(qobj.experiments)): # if mulitple params are provided, we assume that each circuit is corresponding one param # otherwise, params are used for all circuits. param_idx = idx if len(params) > 1 else 0 snapshot_pos = get_instr_pos(qobj, idx, 'snapshot') if len(snapshot_pos) == 0: # does not append the instruction yet. new_ins = snapshot_instr('expectation_value_pauli', 'test', range(num_qubits), params=params[param_idx]) qobj = append_instr(qobj, idx, new_ins) else: for i in snapshot_pos: # update all expectation_value_snapshot if qobj.experiments[idx].instructions[i].type == 'expectation_value_pauli': qobj.experiments[idx].instructions[i].params = params[param_idx] return qobj def _compile_wrapper(circuits, backend, backend_config, compile_config, run_config): transpiled_circuits = compiler.transpile(circuits, backend, **backend_config, **compile_config) if not isinstance(transpiled_circuits, list): transpiled_circuits = [transpiled_circuits] qobj = assemble_circuits(transpiled_circuits, qobj_id=str(uuid.uuid4()), qobj_header=QobjHeader(), run_config=run_config) return qobj, transpiled_circuits def compile_and_run_circuits(circuits, backend, backend_config=None, compile_config=None, run_config=None, qjob_config=None, backend_options=None, noise_config=None, show_circuit_summary=False, has_shared_circuits=False, circuit_cache=None, skip_qobj_validation=False, **kwargs): """ An execution wrapper with Qiskit-Terra, with job auto recover capability. The autorecovery feature is only applied for non-simulator backend. This wraper will try to get the result no matter how long it costs. Args: circuits (QuantumCircuit or list[QuantumCircuit]): circuits to execute backend (BaseBackend): backend instance backend_config (dict, optional): configuration for backend compile_config (dict, optional): configuration for compilation run_config (RunConfig, optional): configuration for running a circuit qjob_config (dict, optional): configuration for quantum job object backend_options (dict, optional): configuration for simulator noise_config (dict, optional): configuration for noise model show_circuit_summary (bool, optional): showing the summary of submitted circuits. has_shared_circuits (bool, optional): use the 0-th circuits as initial state for other circuits. circuit_cache (CircuitCache, optional): A CircuitCache to use when calling compile_and_run_circuits skip_qobj_validation (bool, optional): Bypass Qobj validation to decrease submission time Returns: Result: Result object Raises: AquaError: Any error except for JobError raised by Qiskit Terra """ backend_config = backend_config or {} compile_config = compile_config or {} run_config = run_config or {} qjob_config = qjob_config or {} backend_options = backend_options or {} noise_config = noise_config or {} if backend is None or not isinstance(backend, BaseBackend): raise ValueError('Backend is missing or not an instance of BaseBackend') if not isinstance(circuits, list): circuits = [circuits] if is_simulator_backend(backend): circuits = _avoid_empty_circuits(circuits) if has_shared_circuits: return _reuse_shared_circuits(circuits, backend, backend_config, compile_config, run_config, qjob_config, backend_options) with_autorecover = False if is_simulator_backend(backend) else True if MAX_CIRCUITS_PER_JOB is not None: max_circuits_per_job = int(MAX_CIRCUITS_PER_JOB) else: if is_local_backend(backend): max_circuits_per_job = sys.maxsize else: max_circuits_per_job = backend.configuration().max_experiments if circuit_cache is not None and circuit_cache.try_reusing_qobjs: # Check if all circuits are the same length. # If not, don't try to use the same qobj.experiment for all of them. if len(set([len(circ.data) for circ in circuits])) > 1: circuit_cache.try_reusing_qobjs = False else: # Try setting up the reusable qobj # Compile and cache first circuit if cache is empty. The load method will try to reuse it if circuit_cache.qobjs is None: qobj, _ = _compile_wrapper([circuits[0]], backend, backend_config, compile_config, run_config) if is_aer_provider(backend): qobj = _maybe_add_aer_expectation_instruction(qobj, kwargs) circuit_cache.cache_circuit(qobj, [circuits[0]], 0) qobjs = [] jobs = [] job_ids = [] transpiled_circuits = [] chunks = int(np.ceil(len(circuits) / max_circuits_per_job)) for i in range(chunks): sub_circuits = circuits[i * max_circuits_per_job:(i + 1) * max_circuits_per_job] if circuit_cache is not None and circuit_cache.misses < circuit_cache.allowed_misses: try: if circuit_cache.cache_transpiled_circuits: transpiled_sub_circuits = compiler.transpile(sub_circuits, backend, **backend_config, **compile_config) qobj = circuit_cache.load_qobj_from_cache(transpiled_sub_circuits, i, run_config=run_config) else: qobj = circuit_cache.load_qobj_from_cache(sub_circuits, i, run_config=run_config) if is_aer_provider(backend): qobj = _maybe_add_aer_expectation_instruction(qobj, kwargs) # cache miss, fail gracefully except (TypeError, IndexError, FileNotFoundError, EOFError, AquaError, AttributeError) as e: circuit_cache.try_reusing_qobjs = False # Reusing Qobj didn't work if len(circuit_cache.qobjs) > 0: logger.info('Circuit cache miss, recompiling. Cache miss reason: ' + repr(e)) circuit_cache.misses += 1 else: logger.info('Circuit cache is empty, compiling from scratch.') circuit_cache.clear_cache() qobj, transpiled_sub_circuits = _compile_wrapper(sub_circuits, backend, backend_config, compile_config, run_config) transpiled_circuits.extend(transpiled_sub_circuits) if is_aer_provider(backend): qobj = _maybe_add_aer_expectation_instruction(qobj, kwargs) try: circuit_cache.cache_circuit(qobj, sub_circuits, i) except (TypeError, IndexError, AquaError, AttributeError, KeyError) as e: try: circuit_cache.cache_transpiled_circuits = True circuit_cache.cache_circuit(qobj, transpiled_sub_circuits, i) except (TypeError, IndexError, AquaError, AttributeError, KeyError) as e: logger.info('Circuit could not be cached for reason: ' + repr(e)) logger.info('Transpilation may be too aggressive. Try skipping transpiler.') else: qobj, transpiled_sub_circuits = _compile_wrapper(sub_circuits, backend, backend_config, compile_config, run_config) transpiled_circuits.extend(transpiled_sub_circuits) if is_aer_provider(backend): qobj = _maybe_add_aer_expectation_instruction(qobj, kwargs) # assure get job ids while True: job = run_on_backend(backend, qobj, backend_options=backend_options, noise_config=noise_config, skip_qobj_validation=skip_qobj_validation) try: job_id = job.job_id() break except JobError as e: logger.warning("FAILURE: the {}-th chunk of circuits, can not get job id, " "Resubmit the qobj to get job id. " "Terra job error: {} ".format(i, e)) except Exception as e: logger.warning("FAILURE: the {}-th chunk of circuits, can not get job id, " "Resubmit the qobj to get job id. " "Error: {} ".format(i, e)) job_ids.append(job_id) jobs.append(job) qobjs.append(qobj) if logger.isEnabledFor(logging.DEBUG) and show_circuit_summary: logger.debug("==== Before transpiler ====") logger.debug(summarize_circuits(circuits)) logger.debug("==== After transpiler ====") logger.debug(summarize_circuits(transpiled_circuits)) results = [] if with_autorecover: logger.info("Backend status: {}".format(backend.status())) logger.info("There are {} circuits and they are chunked into {} chunks, " "each with {} circutis (max.).".format(len(circuits), chunks, max_circuits_per_job)) logger.info("All job ids:\n{}".format(job_ids)) for idx in range(len(jobs)): while True: job = jobs[idx] job_id = job_ids[idx] logger.info("Running {}-th chunk circuits, job id: {}".format(idx, job_id)) # try to get result if possible try: result = job.result(**qjob_config) if result.success: results.append(result) logger.info("COMPLETED the {}-th chunk of circuits, " "job id: {}".format(idx, job_id)) break else: logger.warning("FAILURE: the {}-th chunk of circuits, " "job id: {}".format(idx, job_id)) except JobError as e: # if terra raise any error, which means something wrong, re-run it logger.warning("FAILURE: the {}-th chunk of circuits, job id: {} " "Terra job error: {} ".format(idx, job_id, e)) except Exception as e: raise AquaError("FAILURE: the {}-th chunk of circuits, job id: {} " "Unknown error: {} ".format(idx, job_id, e)) from e # something wrong here, querying the status to check how to handle it. # keep qeurying it until getting the status. while True: try: job_status = job.status() break except JobError as e: logger.warning("FAILURE: job id: {}, " "status: 'FAIL_TO_GET_STATUS' " "Terra job error: {}".format(job_id, e)) time.sleep(5) except Exception as e: raise AquaError("FAILURE: job id: {}, " "status: 'FAIL_TO_GET_STATUS' " "Unknown error: ({})".format(job_id, e)) from e logger.info("Job status: {}".format(job_status)) # handle the failure job based on job status if job_status == JobStatus.DONE: logger.info("Job ({}) is completed anyway, retrieve result " "from backend.".format(job_id)) job = backend.retrieve_job(job_id) elif job_status == JobStatus.RUNNING or job_status == JobStatus.QUEUED: logger.info("Job ({}) is {}, but encounter an exception, " "recover it from backend.".format(job_id, job_status)) job = backend.retrieve_job(job_id) else: logger.info("Fail to run Job ({}), resubmit it.".format(job_id)) qobj = qobjs[idx] # assure job get its id while True: job = run_on_backend(backend, qobj, backend_options=backend_options, noise_config=noise_config, skip_qobj_validation=skip_qobj_validation) try: job_id = job.job_id() break except JobError as e: logger.warning("FAILURE: the {}-th chunk of circuits, " "can not get job id. Resubmit the qobj to get job id. " "Terra job error: {} ".format(idx, e)) except Exception as e: logger.warning("FAILURE: the {}-th chunk of circuits, " "can not get job id, Resubmit the qobj to get job id. " "Unknown error: {} ".format(idx, e)) jobs[idx] = job job_ids[idx] = job_id else: results = [] for job in jobs: results.append(job.result(**qjob_config)) result = _combine_result_objects(results) if len(results) != 0 else None return result # skip_qobj_validation = True does what backend.run and aerjob.submit do, but without qobj validation. def run_on_backend(backend, qobj, backend_options=None, noise_config=None, skip_qobj_validation=False): if skip_qobj_validation: job_id = str(uuid.uuid4()) if is_aer_provider(backend): from qiskit.providers.aer.aerjob import AerJob temp_backend_options = backend_options['backend_options'] if backend_options != {} else None temp_noise_config = noise_config['noise_model'] if noise_config != {} else None job = AerJob(backend, job_id, backend._run_job, qobj, temp_backend_options, temp_noise_config, False) job._future = job._executor.submit(job._fn, job._job_id, job._qobj, *job._args) elif is_basicaer_provider(backend): backend._set_options(qobj_config=qobj.config, **backend_options) job = BasicAerJob(backend, job_id, backend._run_job, qobj) job._future = job._executor.submit(job._fn, job._job_id, job._qobj) elif is_ibmq_provider(backend): # TODO: IBMQJob performs validation during the constructor. the following lines does not # skip validation but run as is. from qiskit.providers.ibmq.ibmqjob import IBMQJob job = IBMQJob(backend, None, backend._api, qobj=qobj) job._future = job._executor.submit(job._submit_callback) else: logger.info("Can't skip qobj validation for the third-party provider.") job = backend.run(qobj, **backend_options, **noise_config) return job else: job = backend.run(qobj, **backend_options, **noise_config) return job
47.955607
131
0.6
e4e87102f270697c64c5c88748f883e182bab83b
41,803
py
Python
Lib/woffTools/__init__.py
typesupply/woffTools
964fe6363ede77bc5faec52d82766286c926c94d
[ "MIT" ]
32
2015-02-15T04:39:02.000Z
2021-12-04T15:05:57.000Z
Lib/woffTools/__init__.py
typesupply/woffTools
964fe6363ede77bc5faec52d82766286c926c94d
[ "MIT" ]
7
2015-06-24T22:58:51.000Z
2018-08-20T21:36:29.000Z
Lib/woffTools/__init__.py
typesupply/woffTools
964fe6363ede77bc5faec52d82766286c926c94d
[ "MIT" ]
11
2015-01-29T22:11:34.000Z
2022-01-21T07:31:45.000Z
from __future__ import print_function from fontTools.misc.py23 import * """ This implements the WOFF specification dated September 16, 2009. The main object is the WOFFFont. It is a subclass for the FontTools TTFont object, so it has very similar functionality. The WOFFReader and WOFFWriter are also available for use outside of this module. Those objects are much faster than WOFFFont, but they require much more care. """ import zlib import struct from fontTools.misc import sstruct from xml.etree import ElementTree from fontTools.ttLib import TTFont, debugmsg, sortedTagList from fontTools.ttLib import getSearchRange from fontTools.ttLib.sfnt import calcChecksum, SFNTDirectoryEntry, \ sfntDirectoryFormat, sfntDirectorySize, sfntDirectoryEntryFormat, sfntDirectoryEntrySize # ----------- # Main Object # ----------- class WOFFFont(TTFont): """ This object represents a WOFF file. It is a subclass of the FontTools TTFont object, so the same API applies. For information about the arguments in __init__, refer to the TTFont documentation. This object has two special attributes: metadata and privateData. The metadata attribute returns an ElementTree Element object representing the metadata stored in the font. To set new metadata in the font, you must use this object. The privateData attribute returns the private data stored in the font. To set private data, set a string to font.privateData. """ def __init__(self, file=None, flavor=b"\000\001\000\000", checkChecksums=0, verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False): # can't use the TTFont __init__ because it goes directly to the SFNTReader. # see that method for details about all of this. self.verbose = verbose self.recalcBBoxes = recalcBBoxes self.tables = {} self.reader = None self.last_vid = 0xFFFE self.reverseVIDDict = {} self.VIDDict = {} self.allowVID = allowVID self.ignoreDecompileErrors = ignoreDecompileErrors self.flavor = flavor self.majorVersion = 0 self.minorVersion = 0 self._metadata = None self._tableOrder = None self._tableCache = None if file is not None: if not hasattr(file, "read"): file = open(file, "rb") self.reader = WOFFReader(file, checkChecksums=checkChecksums) self.flavor = self.reader.flavor self.majorVersion = self.reader.majorVersion self.minorVersion = self.reader.minorVersion self._tableOrder = self.reader.keys() else: self._metadata = ElementTree.Element("metadata", version="1.0") self.privateData = None def __getattr__(self, attr): if attr not in ("privateData", "metadata", "lazy"): raise AttributeError(attr) # metadata if attr == "metadata": if self._metadata is not None: return self._metadata if self.reader is not None: text = self.reader.metadata if text: metadata = ElementTree.fromstring(text) else: metadata = ElementTree.Element("metadata", version="1.0") self._metadata = metadata return self._metadata return None # private data elif attr == "privateData": if not hasattr(self, "privateData"): privateData = None if self.reader is not None: privateData = self.reader.privateData self.privateData = privateData return self.privateData elif attr == "lazy": return False # fallback to None return None def keys(self): """ Return a list of all tables in the font. If a table order has been set manually or as the result of opening an existing WOFF file, the set table order will be in the list first. Tables not defined in an existing order will be sorted following the suggested ordering in the OTF/OFF specification. The first table listed in all cases is the GlyphOrder pseudo table. """ tags = set(self.tables.keys()) if self.reader is not None: tags = tags | set(self.reader.keys()) tags = list(tags) if "GlyphOrder" in tags: tags.remove("GlyphOrder") return ["GlyphOrder"] + sortedTagList(tags, self._tableOrder) def setTableOrder(self, order): """ Set the order in which tables should be written into the font. This is required if a DSIG table is in the font. """ self._tableOrder = order def save(self, file, compressionLevel=9, recompressTables=False, reorderTables=True, recalculateHeadChecksum=True): """ Save a WOFF into file a file object specifified by the file argument.. Optionally, file can be a path and a new file will be created at that location. compressionLevel is the compression level to be used with zlib. This must be an int between 1 and 9. The default is 9, the highest compression, but slowest compression time. Set recompressTables to True if you want any already compressed tables to be decompressed and then recompressed using the level specified by compressionLevel. If you want the tables in the WOFF reordered following the suggested optimal table orderings described in the OTF/OFF sepecification, set reorderTables to True. Tables cannot be reordered if a DSIG table is in the font. If you change any of the SFNT data or reorder the tables, the head table checkSumAdjustment must be recalculated. If you are not changing any of the SFNT data, you can set recalculateHeadChecksum to False to prevent the recalculation. This must be set to False if the font contains a DSIG table. """ # if DSIG is to be written, the table order # must be completely specified. otherwise the # DSIG may not be valid after decoding the WOFF. tags = self.keys() if "GlyphOrder" in tags: tags.remove("GlyphOrder") if "DSIG" in tags: if self._tableOrder is None or (set(self._tableOrder) != set(tags)): raise WOFFLibError("A complete table order must be supplied when saving a font with a 'DSIG' table.") elif reorderTables: raise WOFFLibError("Tables can not be reordered when a 'DSIG' table is in the font. Set reorderTables to False.") elif recalculateHeadChecksum: raise WOFFLibError("The 'head' table checkSumAdjustment can not be recalculated when a 'DSIG' table is in the font.") # sort the tags if necessary if reorderTables: tags = sortedTagList(tags) # open a file if necessary closeStream = False if not hasattr(file, "write"): closeStream = True file = open(file, "wb") # write the table data if "GlyphOrder" in tags: tags.remove("GlyphOrder") numTables = len(tags) writer = WOFFWriter(file, numTables, flavor=self.flavor, majorVersion=self.majorVersion, minorVersion=self.minorVersion, compressionLevel=compressionLevel, recalculateHeadChecksum=recalculateHeadChecksum, verbose=self.verbose) for tag in tags: origData = None origLength = None origChecksum = None compLength = None # table is loaded if self.isLoaded(tag): origData = self.getTableData(tag) # table is in reader elif self.reader is not None: if recompressTables: origData = self.getTableData(tag) else: if self.verbose: debugmsg("Reading '%s' table from disk" % tag) origData, origLength, origChecksum, compLength = self.reader.getCompressedTableData(tag) # add to writer writer.setTable(tag, origData, origLength=origLength, origChecksum=origChecksum, compLength=compLength) # write the metadata metadata = None metaOrigLength = None metaLength = None if hasattr(self, "metadata"): declaration = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" tree = ElementTree.ElementTree(self.metadata) f = StringIO() tree.write(f, encoding="utf-8") metadata = f.getvalue() # make sure the metadata starts with the declaration if not metadata.startswith(declaration): metadata = declaration + metadata del f elif self.reader is not None: if recompressTables: metadata = self.reader.metadata else: metadata, metaOrigLength, metaLength = self.reader.getCompressedMetadata() if metadata: writer.setMetadata(metadata, metaOrigLength=metaOrigLength, metaLength=metaLength) # write the private data privData = self.privateData if privData: writer.setPrivateData(privData) # close the writer writer.close() # close the file if closeStream: file.close() def saveXML(self): raise NotImplementedError def importXML(self): raise NotImplementedError # ------ # Reader # ------ woffHeaderFormat = """ > # big endian signature: 4s flavor: 4s length: L numTables: H reserved: H totalSFNTSize: L majorVersion: H minorVersion: H metaOffset: L metaLength: L metaOrigLength: L privOffset: L privLength: L """ woffHeaderSize = sstruct.calcsize(woffHeaderFormat) class WOFFReader(object): def __init__(self, file, checkChecksums=1): self.file = file self.checkChecksums = checkChecksums # unpack the header self.file.seek(0) bytes = self.file.read(woffHeaderSize) if len(bytes) != woffHeaderSize: raise WOFFLibError("Not a properly formatted WOFF file.") sstruct.unpack(woffHeaderFormat, bytes, self) if self.signature != "wOFF": raise WOFFLibError("Not a properly formatted WOFF file.") # unpack the directory self.tables = {} for i in range(self.numTables): entry = WOFFDirectoryEntry() entry.fromFile(self.file) self.tables[entry.tag] = entry def close(self): self.file.close() def __contains__(self, tag): return tag in self.tables has_key = __contains__ def keys(self): """ This returns a list of all tables in the WOFF sorted in ascending order based on the offset of each table. """ sorter = [] for tag, entry in self.tables.items(): sorter.append((entry.offset, tag)) order = [tag for offset, tag in sorted(sorter)] return order def __getitem__(self, tag): entry = self.tables[tag] self.file.seek(entry.offset) data = self.file.read(entry.compLength) # decompress if necessary if entry.compLength < entry.origLength: data = zlib.decompress(data) else: data = data[:entry.origLength] # compare the checksums if self.checkChecksums: checksum = calcTableChecksum(tag, data) if self.checkChecksums > 1: assert checksum == entry.origChecksum, "bad checksum for '%s' table" % tag elif checksum != entry.origChecksum: print("bad checksum for '%s' table" % tag) print() return data def getCompressedTableData(self, tag): entry = self.tables[tag] self.file.seek(entry.offset) data = self.file.read(entry.compLength) return data, entry.origLength, entry.origChecksum, entry.compLength def getCompressedMetadata(self): self.file.seek(self.metaOffset) data = self.file.read(self.metaLength) return data, self.metaOrigLength, self.metaLength def __getattr__(self, attr): if attr not in ("privateData", "metadata"): raise AttributeError(attr) if attr == "privateData": self.file.seek(self.privOffset) return self.file.read(self.privLength) if attr == "metadata": self.file.seek(self.metaOffset) data = self.file.read(self.metaLength) if self.metaLength: data = zlib.decompress(data) assert len(data) == self.metaOrigLength return data def __delitem__(self, tag): del self.tables[tag] # ------ # Writer # ------ class WOFFWriter(object): def __init__(self, file, numTables, flavor="\000\001\000\000", majorVersion=0, minorVersion=0, compressionLevel=9, recalculateHeadChecksum=True, verbose=False): self.signature = "wOFF" self.flavor = flavor self.length = woffHeaderSize + (numTables * woffDirectoryEntrySize) self.totalSFNTSize = sfntDirectorySize + (numTables * sfntDirectoryEntrySize) self.numTables = numTables self.majorVersion = majorVersion self.minorVersion = minorVersion self.metaOffset = 0 self.metaOrigLength = 0 self.metaLength = 0 self.privOffset = 0 self.privLength = 0 self.reserved = 0 self.file = file self.compressionLevel = compressionLevel self.recalculateHeadChecksum = recalculateHeadChecksum self.verbose = verbose # the data is held to facilitate the # head checkSumAdjustment calculation. self.tables = {} self.metadata = None self.privateData = None self.tableDataEnd = 0 self.metadataEnd = 0 def _tableOrder(self): return [entry.tag for index, entry, data in sorted(self.tables.values())] def setTable(self, tag, data, origLength=None, origChecksum=None, compLength=None): # don't compress the head if the checkSumAdjustment needs to be recalculated # the compression will be handled later. if self.recalculateHeadChecksum and tag == "head": # decompress if compLength is not None and compLength < origLength: data = zlib.decompress(data) entry = self._prepTable(tag, data, origLength=len(data), entryOnly=True) # compress else: entry, data = self._prepTable(tag, data=data, origLength=origLength, origChecksum=origChecksum, compLength=compLength) # store self.tables[tag] = (len(self.tables), entry, data) def setMetadata(self, data, metaOrigLength=None, metaLength=None): if not data: return if metaLength is None: if self.verbose: debugmsg("compressing metadata") metaOrigLength = len(data) data = zlib.compress(data, self.compressionLevel) metaLength = len(data) # set the header values self.metaOrigLength = metaOrigLength self.metaLength = metaLength # store self.metadata = data def setPrivateData(self, data): if not data: return privLength = len(data) # set the header value self.privLength = privLength # store self.privateData = data def close(self): if self.numTables != len(self.tables): raise WOFFLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables))) # first, handle the checkSumAdjustment if self.recalculateHeadChecksum and "head" in self.tables: self._handleHeadChecksum() # check the table directory conformance for tag, (index, entry, data) in sorted(self.tables.items()): self._checkTableConformance(entry, data) # write the header header = sstruct.pack(woffHeaderFormat, self) self.file.seek(0) self.file.write(header) # update the directory offsets offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables) order = self._tableOrder() for tag in order: index, entry, data = self.tables[tag] entry.offset = offset offset += calc4BytePaddedLength(entry.compLength) # ensure byte alignment # write the directory self._writeTableDirectory() # write the table data self._writeTableData() # write the metadata self._writeMetadata() # write the private data self._writePrivateData() # write the header self._writeHeader() # go to the beginning of the file self.file.seek(0) # header support def _writeHeader(self): header = sstruct.pack(woffHeaderFormat, self) self.file.seek(0) self.file.write(header) # sfnt support def _prepTable(self, tag, data, origLength=None, origChecksum=None, compLength=None, entryOnly=False): # skip data prep if entryOnly: origLength = origLength origChecksum = calcTableChecksum(tag, data) compLength = 0 # prep the data else: # compress if compLength is None: origData = data origLength = len(origData) origChecksum = calcTableChecksum(tag, data) if self.verbose: debugmsg("compressing '%s' table" % tag) compData = zlib.compress(origData, self.compressionLevel) compLength = len(compData) if origLength <= compLength: data = origData compLength = origLength else: data = compData # make the directory entry entry = WOFFDirectoryEntry() entry.tag = tag entry.offset = 0 entry.origLength = origLength entry.origChecksum = origChecksum entry.compLength = compLength # return if entryOnly: return entry return entry, data def _checkTableConformance(self, entry, data): """ Check the conformance of the table directory entries. These must be checked because the origChecksum, origLength and compLength can be set by an outside caller. """ if self.verbose: debugmsg("checking conformance of '%s' table" % entry.tag) # origLength must be less than or equal to compLength if entry.origLength < entry.compLength: raise WOFFLibError("origLength and compLength are not correct in the '%s' table entry." % entry.tag) # unpack the data as needed if entry.origLength > entry.compLength: origData = zlib.decompress(data) compData = data else: origData = data compData = data # the origLength entry must match the actual length if entry.origLength != len(origData): raise WOFFLibError("origLength is not correct in the '%s' table entry." % entry.tag) # the checksum must be correct if entry.origChecksum != calcTableChecksum(entry.tag, origData): raise WOFFLibError("origChecksum is not correct in the '%s' table entry." % entry.tag) # the compLength must be correct if entry.compLength != len(compData): raise WOFFLibError("compLength is not correct in the '%s' table entry." % entry.tag) def _handleHeadChecksum(self): if self.verbose: debugmsg("updating head checkSumAdjustment") # get the value tables = {} offset = sfntDirectorySize + (sfntDirectoryEntrySize * self.numTables) for (index, entry, data) in sorted(self.tables.values()): tables[entry.tag] = dict(offset=offset, length=entry.origLength, checkSum=entry.origChecksum) offset += calc4BytePaddedLength(entry.origLength) checkSumAdjustment = calcHeadCheckSumAdjustment(self.flavor, tables) # set the value in the head table index, entry, data = self.tables["head"] data = data[:8] + struct.pack(">L", checkSumAdjustment) + data[12:] # compress the data newEntry, data = self._prepTable("head", data) # update the entry data assert entry.origChecksum == newEntry.origChecksum entry.origLength = newEntry.origLength entry.compLength = newEntry.compLength # store self.tables["head"] = (index, entry, data) def _writeTableDirectory(self): if self.verbose: debugmsg("writing table directory") self.file.seek(woffHeaderSize) for tag, (index, entry, data) in sorted(self.tables.items()): entry = sstruct.pack(woffDirectoryEntryFormat, entry) self.file.write(entry) def _writeTableData(self): d = woffHeaderSize + (woffDirectoryEntrySize * self.numTables) offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables) self.file.seek(offset) for tag in self._tableOrder(): if self.verbose: debugmsg("writing '%s' table" % tag) index, entry, data = self.tables[tag] data += "\0" * (calc4BytePaddedLength(entry.compLength) - entry.compLength ) # ensure byte alignment self.file.write(data) self.length += calc4BytePaddedLength(entry.compLength) # ensure byte alignment self.totalSFNTSize += calc4BytePaddedLength(entry.origLength) # ensure byte alignment # store the end for use by metadata or private data self.tableDataEnd = self.length # metadata support def _writeMetadata(self): if self.metadata is None: return if self.verbose: debugmsg("writing metadata") self.length += self.metaLength self.metaOffset = self.tableDataEnd self.file.seek(self.metaOffset) self.file.write(self.metadata) # store the end for use by private data self.metadataEnd = self.metaOffset + self.metaLength # if private data exists, pad to a four byte boundary if self.privateData is not None: padding = calc4BytePaddedLength(self.metaLength) - self.metaLength self.metadataEnd += padding self.length += padding padding = "\0" * padding if padding: self.file.write(padding) # private data support def _writePrivateData(self): if self.privateData is None: return if self.verbose: debugmsg("writing private data") if self.metadata is not None: self.privOffset = self.metadataEnd else: self.privOffset = self.tableDataEnd self.length += self.privLength self.file.seek(self.privOffset) self.file.write(self.privateData) # --------- # Directory # --------- woffDirectoryEntryFormat = """ > # big endian tag: 4s offset: L compLength: L origLength: L origChecksum: L """ woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) class WOFFDirectoryEntry(object): def fromFile(self, file): sstruct.unpack(woffDirectoryEntryFormat, file.read(woffDirectoryEntrySize), self) def fromString(self, str): sstruct.unpack(woffDirectoryEntryFormat, str, self) def toString(self): return sstruct.pack(woffDirectoryEntryFormat, self) def __repr__(self): if hasattr(self, "tag"): return "<WOFFDirectoryEntry '%s' at %x>" % (self.tag, id(self)) else: return "<WOFFDirectoryEntry at %x>" % id(self) # ------- # Helpers # ------- class WOFFLibError(Exception): pass def calc4BytePaddedLength(length): return (length + 3) & ~3 def calcTableChecksum(tag, data): if tag == "head": checksum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:]) else: checksum = calcChecksum(data) checksum = checksum & 0xffffffff return checksum def calcHeadCheckSumAdjustment(flavor, tables): numTables = len(tables) # build the sfnt header searchRange, entrySelector, rangeShift = getSearchRange(numTables) sfntDirectoryData = dict( sfntVersion=flavor, numTables=numTables, searchRange=searchRange, entrySelector=entrySelector, rangeShift=rangeShift ) # build the sfnt directory directory = sstruct.pack(sfntDirectoryFormat, sfntDirectoryData) for tag, entry in sorted(tables.items()): entry = tables[tag] sfntEntry = SFNTDirectoryEntry() sfntEntry.tag = tag sfntEntry.checkSum = entry["checkSum"] sfntEntry.offset = entry["offset"] sfntEntry.length = entry["length"] directory += sfntEntry.toString() # calculate the checkSumAdjustment checkSums = [entry["checkSum"] for entry in tables.values()] checkSums.append(calcChecksum(directory)) checkSumAdjustment = sum(checkSums) checkSumAdjustment = (0xB1B0AFBA - checkSumAdjustment) & 0xffffffff # done return checkSumAdjustment # ---------------- # SFNT Conformance # ---------------- def checkSFNTConformance(file): """ This function checks a SFNT file to see if it meets the conformance recomendations in the WOFF specification. This includes: - searchRange must be correct. - entrySelector must be correct. - rangeShift must be correct. - offset to each table must be after the table directory and before the end of the file. - offset + length of each table must not extend past the end of the file. - the table directory must be in ascending order. - tables must be padded to 4 byte boundaries. - the final table must be padded to a 4 byte boundary. - the gaps between table data blocks must not be more than necessary to pad the table to a 4 byte boundary. - the gap between the end of the final table and the end of the file must not be more than necessary to pad the table to a four byte boundary. - the checksums for each table in the table directory must be correct. - the head checkSumAdjustment must be correct. - the padding bytes must be null. The returned value of this function will be a list. If any errors were found, they will be represented as strings in the list. """ # load the data closeFile = False if not hasattr(file, "read"): file = open(file, "rb") closeFile = True data = file.read() if closeFile: file.close() # storage errors = [] # unpack the header headerData = data[:sfntDirectorySize] header = sstruct.unpack(sfntDirectoryFormat, headerData) # unpack the table directory numTables = header["numTables"] directoryData = data[sfntDirectorySize : sfntDirectorySize + (sfntDirectoryEntrySize * numTables)] tableDirectory = [] for index in range(numTables): entry = sstruct.unpack(sfntDirectoryEntryFormat, directoryData[:sfntDirectoryEntrySize]) tableDirectory.append(entry) directoryData = directoryData[sfntDirectoryEntrySize:] # sanity testing errors += _testOffsetBoundaryValidity(len(data), tableDirectory) errors += _testLengthBoundaryValidity(len(data), tableDirectory) # if one or more errors have already been found, something # is very wrong and this should come to a screeching halt. if errors: return errors # junk at the beginning of the file errors += _testJunkAtTheBeginningOfTheFile(header) # test directory order errors += _testDirectoryOrder(tableDirectory) # load the table data for entry in tableDirectory: offset = entry["offset"] length = entry["length"] entry["data"] = data[offset:offset+length] # test for overlaps errors += _testOverlaps(tableDirectory) # test for padding errors += _testOffsets(tableDirectory) # test the final table padding errors += _testFinalTablePadding(len(data), numTables, tableDirectory[-1]["tag"]) # test for gaps errors += _testGaps(tableDirectory) # test for a gap at the end of the file errors += _testGapAfterFinalTable(len(data), tableDirectory) # test padding value errors += _testPaddingValue(tableDirectory, data) # validate checksums errors += _testCheckSums(tableDirectory) errors += _testHeadCheckSum(header, tableDirectory) # done. return errors def _testOffsetBoundaryValidity(dataLength, tableDirectory): """ >>> test = [ ... dict(tag="test", offset=44) ... ] >>> bool(_testOffsetBoundaryValidity(45, test)) False >>> test = [ ... dict(tag="test", offset=1) ... ] >>> bool(_testOffsetBoundaryValidity(45, test)) True >>> test = [ ... dict(tag="test", offset=46) ... ] >>> bool(_testOffsetBoundaryValidity(45, test)) True """ errors = [] numTables = len(tableDirectory) minOffset = sfntDirectorySize + (sfntDirectoryEntrySize * numTables) for entry in tableDirectory: offset = entry["offset"] tag = entry["tag"] if offset < minOffset: errors.append("The offset to the %s table is not valid." % tag) if offset > dataLength: errors.append("The offset to the %s table is not valid." % tag) return errors def _testLengthBoundaryValidity(dataLength, tableDirectory): """ >>> test = [ ... dict(tag="test", offset=44, length=1) ... ] >>> bool(_testLengthBoundaryValidity(45, test)) False >>> test = [ ... dict(tag="test", offset=44, length=2) ... ] >>> bool(_testLengthBoundaryValidity(45, test)) True """ errors = [] entries = [(entry["offset"], entry) for entry in tableDirectory] for o, entry in sorted(entries): offset = entry["offset"] length = entry["length"] tag = entry["tag"] end = offset + length if end > dataLength: errors.append("The length of the %s table is not valid." % tag) return errors def _testJunkAtTheBeginningOfTheFile(header): """ >>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=16) >>> bool(_testJunkAtTheBeginningOfTheFile(test)) False >>> test = dict(numTables=5, searchRange=0, entrySelector=2, rangeShift=16) >>> bool(_testJunkAtTheBeginningOfTheFile(test)) True >>> test = dict(numTables=5, searchRange=64, entrySelector=0, rangeShift=16) >>> bool(_testJunkAtTheBeginningOfTheFile(test)) True >>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=0) >>> bool(_testJunkAtTheBeginningOfTheFile(test)) True """ errors = [] numTables = header["numTables"] searchRange, entrySelector, rangeShift = getSearchRange(numTables) if header["searchRange"] != searchRange: errors.append("The searchRange value is incorrect.") if header["entrySelector"] != entrySelector: errors.append("The entrySelector value is incorrect.") if header["rangeShift"] != rangeShift: errors.append("The rangeShift value is incorrect.") return errors def _testDirectoryOrder(tableDirectory): """ >>> test = [ ... dict(tag="aaaa"), ... dict(tag="bbbb") ... ] >>> bool(_testDirectoryOrder(test)) False >>> test = [ ... dict(tag="bbbb"), ... dict(tag="aaaa") ... ] >>> bool(_testDirectoryOrder(test)) True """ order = [entry["tag"] for entry in tableDirectory] if order != list(sorted(order)): return ["The table directory is not in ascending order."] return [] def _testOverlaps(tableDirectory): """ >>> test = [ ... dict(tag="aaaa", offset=0, length=100), ... dict(tag="bbbb", offset=1000, length=100), ... ] >>> bool(_testOverlaps(test)) False >>> test = [ ... dict(tag="aaaa", offset=0, length=100), ... dict(tag="bbbb", offset=50, length=100), ... ] >>> bool(_testOverlaps(test)) True >>> test = [ ... dict(tag="aaaa", offset=0, length=100), ... dict(tag="bbbb", offset=0, length=100), ... ] >>> bool(_testOverlaps(test)) True >>> test = [ ... dict(tag="aaaa", offset=0, length=100), ... dict(tag="bbbb", offset=0, length=150), ... ] >>> bool(_testOverlaps(test)) True """ # gather the edges edges = {} for entry in tableDirectory: start = entry["offset"] end = start + entry["length"] edges[entry["tag"]] = (start, end) # look for overlaps overlaps = set() for tag, (start, end) in edges.items(): for otherTag, (otherStart, otherEnd) in edges.items(): tag = tag.strip() otherTag = otherTag.strip() if tag == otherTag: continue if start >= otherStart and start < otherEnd: l = sorted((tag, otherTag)) overlaps.add(tuple(l)) if end > otherStart and end <= otherEnd: l = sorted((tag, otherTag)) overlaps.add(tuple(l)) # report errors = [] if overlaps: for t1, t2 in sorted(overlaps): errors.append("The tables %s and %s overlap." % (t1, t2)) return errors def _testOffsets(tableDirectory): """ >>> test = [ ... dict(tag="test", offset=1) ... ] >>> bool(_testOffsets(test)) True >>> test = [ ... dict(tag="test", offset=2) ... ] >>> bool(_testOffsets(test)) True >>> test = [ ... dict(tag="test", offset=3) ... ] >>> bool(_testOffsets(test)) True >>> test = [ ... dict(tag="test", offset=4) ... ] >>> bool(_testOffsets(test)) False """ errors = [] # make the entries sortable entries = [(entry["offset"], entry) for entry in tableDirectory] for o, entry in sorted(entries): offset = entry["offset"] if offset % 4: errors.append("The %s table does not begin on a 4-byte boundary." % entry["tag"].strip()) return errors def _testFinalTablePadding(dataLength, numTables, finalTableTag): """ >>> bool(_testFinalTablePadding( ... sfntDirectorySize + sfntDirectoryEntrySize + 1, ... 1, ... "test" ... )) True >>> bool(_testFinalTablePadding( ... sfntDirectorySize + sfntDirectoryEntrySize + 2, ... 1, ... "test" ... )) True >>> bool(_testFinalTablePadding( ... sfntDirectorySize + sfntDirectoryEntrySize + 3, ... 1, ... "test" ... )) True >>> bool(_testFinalTablePadding( ... sfntDirectorySize + sfntDirectoryEntrySize + 4, ... 1, ... "test" ... )) False """ errors = [] if (dataLength - (sfntDirectorySize + (sfntDirectoryEntrySize * numTables))) % 4: errors.append("The final table (%s) is not properly padded." % finalTableTag) return errors def _testGaps(tableDirectory): """ >>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2) >>> test = [ ... dict(offset=start, length=4, tag="test1"), ... dict(offset=start+4, length=4, tag="test2"), ... ] >>> bool(_testGaps(test)) False >>> test = [ ... dict(offset=start, length=4, tag="test1"), ... dict(offset=start+5, length=4, tag="test2"), ... ] >>> bool(_testGaps(test)) True >>> test = [ ... dict(offset=start, length=4, tag="test1"), ... dict(offset=start+8, length=4, tag="test2"), ... ] >>> bool(_testGaps(test)) True """ errors = [] sorter = [] for entry in tableDirectory: sorter.append((entry["offset"], entry)) prevTag = None prevEnd = None for offset, entry in sorted(sorter): length = entry["length"] length = calc4BytePaddedLength(length) tag = entry["tag"] if prevEnd is None: prevEnd = offset + length prevTag = tag else: if offset - prevEnd != 0: errors.append("Improper padding between the %s and %s tables." % (prevTag, tag)) prevEnd = offset + length prevTag = tag return errors def _testGapAfterFinalTable(dataLength, tableDirectory): """ >>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2) >>> test = [ ... dict(offset=start, length=1, tag="test") ... ] >>> bool(_testGapAfterFinalTable(start + 4, test)) False >>> test = [ ... dict(offset=start, length=1, tag="test") ... ] >>> bool(_testGapAfterFinalTable(start + 5, test)) True >>> test = [ ... dict(offset=start, length=1, tag="test") ... ] >>> bool(_testGapAfterFinalTable(start + 8, test)) True """ errors = [] sorter = [] for entry in tableDirectory: sorter.append((entry["offset"], entry)) entry = sorted(sorter)[-1] offset = entry[-1]["offset"] length = entry[-1]["length"] length = calc4BytePaddedLength(length) lastPosition = offset + length if dataLength - lastPosition > 0: errors.append("Improper padding at the end of the file.") return errors def _testCheckSums(tableDirectory): """ >>> data = "0" * 44 >>> checkSum = calcTableChecksum("test", data) >>> test = [ ... dict(data=data, checkSum=checkSum, tag="test") ... ] >>> bool(_testCheckSums(test)) False >>> test = [ ... dict(data=data, checkSum=checkSum+1, tag="test") ... ] >>> bool(_testCheckSums(test)) True """ errors = [] for entry in tableDirectory: tag = entry["tag"] checkSum = entry["checkSum"] data = entry["data"] shouldBe = calcTableChecksum(tag, data) if checkSum != shouldBe: errors.append("Invalid checksum for the %s table." % tag) return errors def _testHeadCheckSum(header, tableDirectory): """ >>> header = dict(sfntVersion="OTTO") >>> tableDirectory = [ ... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)), ... dict(tag="aaab", offset=200, length=100, checkSum=456), ... dict(tag="aaac", offset=300, length=100, checkSum=789), ... ] >>> bool(_testHeadCheckSum(header, tableDirectory)) """ flavor = header["sfntVersion"] tables = {} for entry in tableDirectory: tables[entry["tag"]] = entry data = tables["head"]["data"][8:12] checkSumAdjustment = struct.unpack(">L", data)[0] shouldBe = calcHeadCheckSumAdjustment(flavor, tables) if checkSumAdjustment != shouldBe: return ["The head checkSumAdjustment value is incorrect."] return [] def _testPaddingValue(tableDirectory, data): """ # before first table >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 32)) False >>> testDirectory = [dict(tag="aaaa", offset=32, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 36)) True # between tables >>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=48, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 52)) False >>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=52, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 56)) True # after final table >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 32)) False >>> testDirectory = [dict(tag="aaaa", offset=28, length=4)] >>> bool(_testPaddingValue(testDirectory, "\x01" * 36)) True """ errors = [] # check between directory and first table # check between all tables entries = [(entry["offset"], entry) for entry in tableDirectory] prev = "table directory" prevEnd = sfntDirectorySize + (sfntDirectoryEntrySize * len(tableDirectory)) for o, entry in sorted(entries): tag = entry["tag"] offset = entry["offset"] length = entry["length"] # slice the bytes between the previous and the current if offset > prevEnd: bytes = data[prevEnd:offset] # replace \0 with nothing bytes = bytes.replace("\0", "") if bytes: errors.append("Bytes between %s and %s are not null." % (prev, tag)) # shift for teh next table prev = tag prevEnd = offset + length # check last table entry = sorted(entries)[-1][1] end = entry["offset"] + entry["length"] bytes = data[end:] bytes = bytes.replace("\0", "") if bytes: errors.append("Bytes after final table (%s) are not null." % entry["tag"]) return errors if __name__ == "__main__": import doctest doctest.testmod(verbose=False)
35.366328
133
0.608641
c04f8c4263173b47a2ca5eb3e250aff1011fa486
382
py
Python
posts/migrations/0009_auto_20201104_1432.py
Joe-Sin7h/melodiam
756996464a79a1e2066004d2bcc05c10fbbf3e8e
[ "MIT" ]
null
null
null
posts/migrations/0009_auto_20201104_1432.py
Joe-Sin7h/melodiam
756996464a79a1e2066004d2bcc05c10fbbf3e8e
[ "MIT" ]
null
null
null
posts/migrations/0009_auto_20201104_1432.py
Joe-Sin7h/melodiam
756996464a79a1e2066004d2bcc05c10fbbf3e8e
[ "MIT" ]
null
null
null
# Generated by Django 3.0.7 on 2020-11-04 09:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0008_auto_20201104_1400'), ] operations = [ migrations.AlterField( model_name='post', name='artist', field=models.CharField(max_length=50), ), ]
20.105263
50
0.594241
8fc0c2a59058c1956092751787638acb32f22e04
22,615
py
Python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_express_route_gateways_operations.py
praveenkuttappan/azure-sdk-for-python
4b79413667b7539750a6c7dde15737013a3d4bd5
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_express_route_gateways_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_express_route_gateways_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ExpressRouteGatewaysOperations: """ExpressRouteGatewaysOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_05_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def list_by_subscription( self, **kwargs: Any ) -> "_models.ExpressRouteGatewayList": """Lists ExpressRoute gateways under a given subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteGatewayList, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteGatewayList :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self.list_by_subscription.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore async def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> "_models.ExpressRouteGatewayList": """Lists ExpressRoute gateways in a given resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteGatewayList, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteGatewayList :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, express_route_gateway_name: str, put_express_route_gateway_parameters: "_models.ExpressRouteGateway", **kwargs: Any ) -> "_models.ExpressRouteGateway": cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(put_express_route_gateway_parameters, 'ExpressRouteGateway') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('ExpressRouteGateway', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('ExpressRouteGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, express_route_gateway_name: str, put_express_route_gateway_parameters: "_models.ExpressRouteGateway", **kwargs: Any ) -> AsyncLROPoller["_models.ExpressRouteGateway"]: """Creates or updates a ExpressRoute gateway in a specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param express_route_gateway_name: The name of the ExpressRoute gateway. :type express_route_gateway_name: str :param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT operation. :type put_express_route_gateway_parameters: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteGateway :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either ExpressRouteGateway or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.ExpressRouteGateway] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, put_express_route_gateway_parameters=put_express_route_gateway_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ExpressRouteGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore async def get( self, resource_group_name: str, express_route_gateway_name: str, **kwargs: Any ) -> "_models.ExpressRouteGateway": """Fetches the details of a ExpressRoute gateway in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param express_route_gateway_name: The name of the ExpressRoute gateway. :type express_route_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteGateway, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRouteGateway :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRouteGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, express_route_gateway_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-05-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore async def begin_delete( self, resource_group_name: str, express_route_gateway_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway resource can only be deleted when there are no connection subresources. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param express_route_gateway_name: The name of the ExpressRoute gateway. :type express_route_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
50.934685
209
0.688658
42c5e4369a055840e1498be787b1f36246967cb9
19,329
py
Python
tools/pylora/loraopts.py
ricaun/basicmac
69e55e953b652ef26e52819ab77559e4a81baf70
[ "BSD-3-Clause" ]
80
2019-04-30T23:21:12.000Z
2021-08-30T11:01:34.000Z
tools/pylora/loraopts.py
ricaun/basicmac
69e55e953b652ef26e52819ab77559e4a81baf70
[ "BSD-3-Clause" ]
33
2020-04-15T23:09:26.000Z
2022-02-11T20:50:41.000Z
tools/pylora/loraopts.py
ricaun/basicmac
69e55e953b652ef26e52819ab77559e4a81baf70
[ "BSD-3-Clause" ]
42
2019-04-30T17:45:30.000Z
2021-09-23T01:08:28.000Z
# Copyright (C) 2016-2019 Semtech (International) AG. All rights reserved. # # This file is subject to the terms and conditions defined in file 'LICENSE', # which is part of this source code package. from typing import Callable,Dict,List,Tuple,Union import re class BF(): """Bitfield - might also cover entire bytes.""" def __init__(self, name:str='', bits:Union[int,Tuple[int,int]]=(0,0), signed=False) -> None: self.name = name self.brange = (bits,bits) if isinstance(bits,int) else bits self.signed = signed self.value = 0 def decode(self, v:int) -> int: eb = 2<<self.brange[1] x = (v & (eb-1)) >>self.brange[0] if self.signed and x >= (eb>>1): x = x-eb self.value = x return x def encode(self) -> int: eb = 2<<self.brange[1] lo = self.brange[0] return (self.value << lo) & (eb-1) def value_as_int(self) -> int: return self.value def value_as_str(self, fmt:str='s') -> str: return '{:d}'.format(self.value_as_int()) def bits_as_str(self) -> str: if self.brange[0] != self.brange[1]: return '%d-%d' % self.brange return '%d' % self.brange[0] def __repr__(self) -> str: return '%s:%s=%s' % (self.name, self.bits_as_str(), self.value_as_str('s')) def __str__(self) -> str: return '%s=%s' % (self.name, self.value_as_str('r')) def format(self, fmt:str) -> str: if 'S' in fmt: # structure spec return '%s:%s' % (self.name, self.bits_as_str()) if 'D' in fmt: # details return repr(self) return str(self) class Ux(): def __init__(self, n, fields:Tuple[BF,...]) -> None: self.n = n self.fields = fields for f in fields: self.__setattr__(f.name, f) self.value = 0 def decode(self, ba:bytes, off:int) -> int: v = 0 n = self.n for i in range(n): v |= ba[off+i]<<(i*8) self.value = v for f in self.fields: f.decode(v) return off+n def encode(self, ba:bytearray, off:int) -> int: v = 0 for f in self.fields: v |= f.encode() n = self.n for i in range(n): ba[off+i] = (v>>(i*8)) & 0xFF return off+n def __repr__(self) -> str: return 'u%d(%s)' % (self.n, ','.join(map(repr, self.fields))) def __str__(self) -> str: return ' '.join(map(str, filter(lambda a: not isinstance(a,RFU), self.fields))) def format(self, fmt:str) -> str: """Format element U disables rendering of Ux elements. Element u/text defines the separator between Ux elements (default is comma).""" sep = ',' m = re.search(r'[Uu]/([^/]*)/', fmt) if m: sep = m.group(1) f = sep.join(filter(lambda f:f, map(lambda f:f.format(fmt), self.fields))) if 'U' in fmt: return f return 'u%d(%s)' % (self.n, f) class U8(Ux): def __init__(self, *fields:BF) -> None: super().__init__(1, fields) class U16(Ux): def __init__(self, *fields:BF) -> None: super().__init__(2, fields) class U24(Ux): def __init__(self, *fields:BF) -> None: super().__init__(3, fields) class U32(Ux): def __init__(self, *fields:BF) -> None: super().__init__(4, fields) class BFx(BF): def __init__(self, name:str='', bits:Union[int,Tuple[int,int]]=(0,0), signed=False) -> None: super().__init__(name,bits,signed) def value_as_str(self, fmt:str='s') -> str: return 'x{:X}'.format(self.value_as_int()) class RFU(BFx): def __init__(self, from_bit=0, to_bit=None, ext:str='') -> None: super().__init__('RFU'+ext,bits=(from_bit, to_bit or from_bit)) def value_as_str(self, fmt:str='s') -> str: v = self.value_as_int() if v < 10: return str(v) return 'x{:X}'.format(v) def format(self, fmt:str) -> str: if 'R' in fmt: return '' return super().format(fmt) class Freq(BF): def __init__(self, name:str='Freq') -> None: super().__init__(name, (0,23)) def value_as_str(self, fmt:str='s') -> str: v = self.value_as_int() if v<1000000: return '%d' % v return '%.1fkHz' % (v/10) if (v % 10) else '%.3fMHz' % (v/1e4) class Ack(BF): def __init__(self, name:str, bit:int) -> None: super().__init__(name, bit) def value_as_str(self, fmt:str='s') -> str: v = self.value_as_int() return '1/ACK' if v else '0/NACK' class Opt(): CMD = 0 NAME = '' def __init__(self, *args:Ux, **kwargs) -> None: self.args = args for a in args: for f in a.fields: self.__setattr__(f.name, f) self.set(**kwargs) def __repr__(self) -> str: a = ' '.join(map(repr, self.args)) if a: a=' '+a return '%s<x%02X%s>' % (self.NAME, self.CMD & 0xFF, a) def __str__(self) -> str: return '%s<%s>' % (self.NAME, ' '.join(map(str, self.args))) def set(self, **kwargs) -> None: for k,v in kwargs.items(): getattr(self,k).value = v def format(self, fmt:str) -> str: """Format element A print arguments only. a/text/ print command and arguments and separates arguments with text (default: space). Char 'C' suppresses the hex command code.""" sep = ' ' m = re.search(r'[aA]/([^/]*)/', fmt) if m: sep = m.group(1) a = sep.join(map(lambda a:a.format(fmt), self.args)) if 'A' in fmt: return a chex = 'x%02X ' % (self.CMD & 0xFF) if 'C' not in fmt else '' return '%s<%s%s>' % (self.NAME, chex, a) def decode(self, ba:bytes, off:int) -> int: assert ba[off] == self.CMD & 0xFF off += 1 for a in self.args: off = a.decode(ba,off) return off def encode(self, ba:bytearray, off:int) -> int: ba[off] = self.CMD & 0xFF off += 1 for a in self.args: off = a.encode(ba,off) return off class OptUp(Opt): pass class OptDn(Opt): pass class Unknown(Opt): CMD = 0 NAME = '?' def __init__(self, n:int) -> None: self.n = n self.un = b'' def __repr__(self) -> str: return '%s<x%s>' % (self.NAME, self.un.hex().upper()) def __str__(self) -> str: return self.__repr__() def format(self, fmt:str) -> str: return self.__repr__() def decode(self, ba:bytes, off:int) -> int: n = self.n self.un = ba[off:off+n] return off+n def encode(self, ba:bytearray, off:int) -> int: n = self.n ba[off:off+n] = self.un return off+n # ================================================================================ # Reset Conf/Ind --- ABP only --- # ================================================================================ class ResetInd(OptUp): CMD = 1 def __init__(self, **kwargs) -> None: super().__init__( U8(BF('Minor',(0,3)), RFU(4,7)), **kwargs) class ResetConf(OptDn): CMD = 1 def __init__(self, **kwargs) -> None: super().__init__( U8(BF('Minor',(0,3)), RFU(4,7)), **kwargs) # ================================================================================ # LinkCheck Req/Ans # ================================================================================ class LinkCheckReq(OptUp): CMD = 2 class LinkCheckAns(OptDn): CMD = 2 def __init__(self, **kwargs) -> None: super().__init__( U8(BF('Margin',(0,7))), U8(BF('GwCnt', (0,7))), **kwargs) # ================================================================================ # LinkADR Req/Ans # ================================================================================ class LinkADRReq(OptDn): CMD = 3 def __init__(self, **kwargs) -> None: super().__init__( U8 ( BF ('TXPow',(0,3)), BF('DR',(4,7)) ), U16( BFx('ChMask',(0,15)) ), U8 ( BF ('NbTrans', (0,3)), BF('ChMaskCntl', (4,6)), RFU(7) ), **kwargs) class LinkADRAns(OptUp): CMD = 3 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('ChAck',0), Ack('DRAck',1), Ack('TXPowAck',2), RFU(3,7)), **kwargs) # ================================================================================ # DutyCycle Req/Ans # ================================================================================ class DutyCycleReq(OptDn): CMD = 4 def __init__(self, **kwargs) -> None: super().__init__( # dc = 2^-MaxDC (aggr. over all channels) U8( BF('MaxDC',(0,3), RFU(4,7)) ), **kwargs) class DutyCycleAns(OptUp): CMD = 4 # ================================================================================ # RXParamSetup Req/Ans # ================================================================================ class RXParamSetupReq(OptDn): CMD = 5 def __init__(self, **kwargs) -> None: super().__init__( U8 ( BF('RX2DR',(0,3)), BF('RX1DRoff',(4,6)), RFU(7) ), U24( Freq() ), **kwargs) class RXParamSetupAns(OptUp): CMD = 5 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('FreqAck',0), Ack('RX2DRAck',1), Ack('RX1DRoffAck',2), RFU(3,7)), **kwargs) # ================================================================================ # DevStatus Req/Ans # ================================================================================ class DevStatusReq(OptDn): CMD = 6 class DevStatusAns(OptUp): CMD = 6 def __init__(self, **kwargs) -> None: super().__init__( U8(BF('Batt', (0,7))), U8(BF('Margin',(0,5), signed=True), RFU(6,7)), **kwargs) # ================================================================================ # NewChannel Req/Ans # ================================================================================ class NewChannelReq(OptDn): CMD = 7 def __init__(self, **kwargs) -> None: super().__init__( U8 ( BF('Chnl',(0,7)) ), U24( Freq() ), U8 ( BF('MinDR',(0,3)), BF('MaxDR',(4,7)) ), **kwargs) class NewChannelAns(OptUp): CMD = 7 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('ChnlAck',0), Ack('DRAck',1), RFU(2,7)), **kwargs) # ================================================================================ # RXTimingSetup Req/Ans # ================================================================================ class RXTimingSetupReq(OptDn): CMD = 8 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('Delay',(0,3)), RFU(4,7) ), **kwargs) class RXTimingSetupAns(OptUp): CMD = 8 # ================================================================================ # TXParamSetup Req/Ans # ================================================================================ class MaxEIRP(BF): def __init__(self, name:str='MaxEIRP', bits:Union[int,Tuple[int,int]]=(0,3)) -> None: super().__init__(name,bits) def value_as_str(self, fmt:str='s') -> str: v = self.value_as_int() return '%d(%ddBm)' % (v,[8,10,12,13,14,16,18,20,21,24,26,27,29,30,33,36][v]) class TXParamSetupReq(OptDn): CMD = 9 def __init__(self, **kwargs) -> None: super().__init__(U8( MaxEIRP(), BF('UpDwell',4), BF('DnDwell',5), RFU(6,7) ), **kwargs) class TXParamSetupAns(OptUp): CMD = 9 # ================================================================================ # DlChannel Req/Ans # ================================================================================ class DlChannelReq(OptDn): CMD = 10 def __init__(self, **kwargs) -> None: super().__init__( U8( BF('Chnl',(0,8)) ), U24(Freq()), **kwargs ) class DlChannelAns(OptUp): CMD = 10 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('FreqAck',0), Ack('ChnlAck',1), RFU(2,7)), **kwargs) # ================================================================================ # Rekey Ind/Conf --- OTAA only --- # ================================================================================ class RekeyIndOLD(OptUp): """Old TrackNet impl of RekeyInd based on an intermediate spec.""" CMD = 0x10B # disambiguate from correct option SPECIAL = b'\x0B\x11\x00\x03' def __init__(self, **kwargs) -> None: super().__init__( U8(BF('Minor',(0,3)), BF('Major',(4,7))), U8(BF('Nonce',(0,7))), U8(BF('DUOFCNT',0), BF('FCNT32',1), BF('RFU',(2,3)), BF('Extra',(4,7))), **kwargs) class RekeyInd(OptUp): CMD = 11 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('Minor',(0,3)), RFU(4,7) ), **kwargs) class RekeyConf(OptDn): CMD = 11 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('Minor',(0,3)), RFU(4,7) ), **kwargs) # ================================================================================ # ADRParamSetup Req/Ans # ================================================================================ class ADRParamSetupReq(OptDn): CMD = 12 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('Delay',(0,3)), BF('Limit',(4,7)) ), **kwargs) class ADRParamSetupAns(OptUp): CMD = 12 # ================================================================================ # DeviceTime Req/Ans # ================================================================================ class DeviceTimeReq(OptUp): CMD = 13 class DeviceTimeAns(OptDn): CMD = 13 def __init__(self, **kwargs) -> None: # time = epoch_secs + frac/2^8 super().__init__(U32( BF('epoch_secs',(0,31)) ), U8( BF('frac',(0,7)) ), **kwargs) # ================================================================================ # ForceRejoin Req # ================================================================================ class ForceRejoinReq(OptDn): CMD = 14 def __init__(self, **kwargs) -> None: super().__init__(U16( BF('DR',(0,3)), BF('RejoinType',(4,6)), BF('MaxRetries',(8,10)), BF('Period',(11,13)), RFU(14,15) ), **kwargs) # ================================================================================ # RejoinParam Req/Ans # ================================================================================ class RejoinParamReq(OptDn): CMD = 15 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('MaxCountN',(0,3)), BF('MaxTimeN',(4,7)) ), **kwargs) class RejoinParamAns(OptUp): CMD = 15 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('TimeAck',0), RFU(1,7)), **kwargs) # ================================================================================ # PingSlotInfo Req/Ans # ================================================================================ class Intv(BF): def __init__(self, name:str='Intv', bits:Union[int,Tuple[int,int]]=(0,2)) -> None: super().__init__(name,bits) def value_as_str(self, fmt:str='s') -> str: v = self.value_as_int() return '%d(%ds)' % (v,1<<v) class PingSlotInfoReq(OptUp): CMD = 16 def __init__(self, **kwargs) -> None: # periodicity: 2^Intv secs super().__init__(U8( Intv(bits=(0,2)), RFU(3,7) ), **kwargs) class PingSlotInfoAns(OptDn): CMD = 16 # ================================================================================ # PingSlotChnl Req/Ans # ================================================================================ class PingSlotChnlReq(OptDn): CMD = 17 def __init__(self, **kwargs) -> None: super().__init__( U24( Freq() ), U8 ( BF('DR',(0,3)), RFU(4,7) ), **kwargs) class PingSlotChnlAns(OptUp): CMD = 17 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('FreqAck',0), Ack('DRAck',1), RFU(2,7)), **kwargs) # ================================================================================ # BeaconTiming Req/Ans # ================================================================================ class BeaconTimingReq(OptUp): CMD = 18 class BeaconTimingAns(OptDn): CMD = 18 def __init__(self, **kwargs) -> None: # deprecated as of 1.1 super().__init__(U16( BF('Delay',(0,15)) ), U8( BF('Chnl',(0,7)) ), **kwargs) # ================================================================================ # BeaconFreq Req/Ans # ================================================================================ class BeaconFreqReq(OptDn): CMD = 19 def __init__(self, **kwargs) -> None: super().__init__(U24( Freq() ), **kwargs) class BeaconFreqAns(OptUp): CMD = 19 def __init__(self, **kwargs) -> None: super().__init__( U8(Ack('FreqAck',0), RFU(1,7)), **kwargs) # ================================================================================ # DeviceMode Ind/Conf # ================================================================================ class DeviceModeInd(OptUp): CMD = 32 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('DevClass',(0,7)) ), **kwargs) class DeviceModeConf(OptDn): CMD = 32 def __init__(self, **kwargs) -> None: super().__init__(U8( BF('DevClass',(0,7)) ), **kwargs) # ================================================================================ # ----- common lookup tables ----- # ================================================================================ T_CtorOpt = Callable[[],Opt] T_CtorOptMap = Dict[int,T_CtorOpt] def _scan_classes(refcls:T_CtorOpt) -> T_CtorOptMap: d = {} # type: T_CtorOptMap type_refcls = type(refcls) for name,cls in globals().items(): if type(cls) is not type_refcls or cls is refcls or not issubclass(cls,refcls): # type: ignore continue if not cls.NAME: # pragma:nobranch cls.NAME = name d[cls.CMD] = cls return d OPTSUP = _scan_classes(OptUp) OPTSDN = _scan_classes(OptDn) def unpack_opts(opts:Union[str,bytes], D:T_CtorOptMap) -> List[Opt]: if isinstance(opts,str): binopts = bytes.fromhex(opts) else: binopts = opts res = [] # type: List[Opt] off = 0 n = len(binopts) while off < n: offini = off cmd = binopts[off] optcls = D.get(cmd) if optcls is None: opt = Unknown(n-off) # type: Opt else: if optcls is RekeyInd and binopts[off:off+4] == RekeyIndOLD.SPECIAL: opt = RekeyIndOLD() else: opt = optcls() off = opt.decode(binopts,off) res.append(opt) return res def unpack_optsup(opts:Union[str,bytes]) -> List[Opt]: return unpack_opts(opts, OPTSUP) def unpack_optsdn(opts:Union[str,bytes]) -> List[Opt]: return unpack_opts(opts, OPTSDN) def pack_opts(opts:List[Opt]) -> bytes: ba = bytearray(256) off = 0 for o in opts: off = o.encode(ba,off) return bytes(ba[0:off])
30.827751
108
0.450929
06d6a1b7f238dddd5f65520e3f34d19454275011
1,864
py
Python
data/src/generate_translation_sheet.py
Shinjjanggu/majsoul-plus-korean
f2efaeb2e98ea18978f1e3e686380078f8186f57
[ "MIT" ]
19
2020-06-22T01:24:41.000Z
2022-03-28T08:25:26.000Z
data/src/generate_translation_sheet.py
saltymocha1/MajakPlusKorean
5aea3d5715424fa2912a6e8524b3c26459e413e4
[ "MIT" ]
4
2020-08-26T04:06:01.000Z
2022-03-20T03:23:51.000Z
data/src/generate_translation_sheet.py
saltymocha1/MajakPlusKorean
5aea3d5715424fa2912a6e8524b3c26459e413e4
[ "MIT" ]
44
2022-03-15T12:29:36.000Z
2022-03-31T11:36:37.000Z
#! /usr/bin/python import csv from pathlib import Path import os lang = os.getenv('MAJSOUL_LANG', 'jp') def main(translation_path, temp_path): translate_sheet_rows = [] csv_dir_path = Path(temp_path) / 'csv' for csv_path in sorted(csv_dir_path.glob('*.csv')): with open(csv_path, 'r', encoding='utf-8-sig') as csvfile: csv_reader = csv.reader(csvfile) is_header = True header = [] header_to_translate = [] for row in csv_reader: if is_header: header = row for i, col in enumerate(header): if col == lang or col.endswith(f'_{lang}'): header_to_translate.append(i) if not header_to_translate: break is_header = False continue for col_index in header_to_translate: translate_sheet_rows.append('|'.join([ csv_path.name, header[col_index], row[col_index].replace('\\', '\\\\').replace('\n', '\\n') ])) # unique element translate_sheet_rows = list(dict.fromkeys(translate_sheet_rows)) templates_path = Path(translation_path) / 'templates' templates_path.mkdir(parents=True, exist_ok=True) with open(templates_path / 'translate_sheet.csv', 'w', encoding='utf-8-sig', newline='') as csvfile: csv_writer = csv.writer(csvfile) csv_writer.writerow(['location', 'context', 'source', 'target']) for row in translate_sheet_rows: r = row.split('|') csv_writer.writerow(r + [r[2]]) if __name__ == '__main__': main( str(Path('./translation')), str(Path('./temp')) )
35.169811
104
0.531116
e36c849acba04767f265609b39da54dbae94f7ea
7,081
py
Python
discovery-infra/test_infra/helper_classes/nodes.py
RazRegev/assisted-test-infra
7a9a0eecba0dc3bf149618e3a57dd6d588673dc1
[ "Apache-2.0" ]
null
null
null
discovery-infra/test_infra/helper_classes/nodes.py
RazRegev/assisted-test-infra
7a9a0eecba0dc3bf149618e3a57dd6d588673dc1
[ "Apache-2.0" ]
null
null
null
discovery-infra/test_infra/helper_classes/nodes.py
RazRegev/assisted-test-infra
7a9a0eecba0dc3bf149618e3a57dd6d588673dc1
[ "Apache-2.0" ]
null
null
null
import json import logging import random from typing import Dict, Iterator, List from munch import Munch from test_infra import utils from test_infra.controllers.node_controllers.node import Node from test_infra.controllers.node_controllers.node_controller import NodeController from test_infra.tools.concurrently import run_concurrently from tests.conftest import env_variables class NodeMapping: def __init__(self, node, cluster_host): self.name = node.name self.node = node self.cluster_host = cluster_host class Nodes: def __init__(self, node_controller: NodeController, private_ssh_key_path): self.controller = node_controller self.private_ssh_key_path = private_ssh_key_path self._nodes = None self._nodes_as_dict = None @property def nodes(self) -> List[Node]: if not self._nodes: self._nodes = self._list() return self._nodes def __getitem__(self, i): return self.nodes[i] def __len__(self): return len(self.nodes) def __iter__(self) -> Iterator[Node]: for n in self.nodes: yield n def drop_cache(self): self._nodes = None self._nodes_as_dict = None def get_masters(self): return [node for node in self.nodes if node.is_master_in_name()] def get_workers(self): return [node for node in self.nodes if node.is_worker_in_name()] @property def nodes_as_dict(self): if not self._nodes_as_dict: self._nodes_as_dict = {node.name: node for node in self.nodes} return self._nodes_as_dict def _list(self): # TODO list_nodes return type is Dict[str, Node] but returns list nodes = self.controller.list_nodes() return [Node(node.name(), self.controller, self.private_ssh_key_path) for node in nodes] @property def setup_time(self): return self.controller.setup_time def get_random_node(self): return random.choice(self.nodes) def shutdown_all(self): self.run_for_all_nodes("shutdown") def start_all(self): static_ips_config = env_variables.get('static_ips_config') if static_ips_config: skip_ips = False else: skip_ips = True self.run_for_all_nodes("start", skip_ips) def start_given(self, nodes): self.run_for_given_nodes(nodes, "start") def shutdown_given(self, nodes): self.run_for_given_nodes(nodes, "shutdown") def format_all_disks(self): self.run_for_all_nodes("format_disk") def destroy_all(self): self.run_for_all_nodes("shutdown") def destroy_all_nodes(self): self.controller.destroy_all_nodes() def prepare_nodes(self): self.controller.prepare_nodes() def reboot_all(self): self.run_for_all_nodes("restart") def reboot_given(self, nodes): self.run_for_given_nodes(nodes, "restart") def get_cluster_network(self): return self.controller.get_cluster_network() def set_correct_boot_order(self, nodes=None, start_nodes=False): nodes = nodes or self.nodes logging.info("Going to set correct boot order to nodes: %s", nodes) self.run_for_given_nodes(nodes, "set_boot_order_flow", False, start_nodes) def run_for_all_nodes(self, func_name, *args): return self.run_for_given_nodes(self.nodes, func_name, *args) @staticmethod def run_for_given_nodes(nodes, func_name, *args): logging.info("Running %s on nodes : %s", func_name, nodes) return run_concurrently([(getattr(node, func_name), *args) for node in nodes]) def run_for_given_nodes_by_cluster_hosts(self, cluster_hosts, func_name, *args): return self.run_for_given_nodes([self.get_node_from_cluster_host(host) for host in cluster_hosts], func_name, *args) @staticmethod def run_ssh_command_on_given_nodes(nodes, command) -> Dict: return run_concurrently({node.name: (node.run_command, command) for node in nodes}) def set_wrong_boot_order(self, nodes=None, start_nodes=True): nodes = nodes or self.nodes logging.info("Setting wrong boot order for %s", self.nodes_as_dict.keys()) self.run_for_given_nodes(nodes, "set_boot_order_flow", True, start_nodes) def get_bootstrap_node(self, cluster) -> Node: for cluster_host_object in cluster.get_hosts(): if cluster_host_object.get("bootstrap", False): node = self.get_node_from_cluster_host(cluster_host_object) logging.info("Bootstrap node is %s", node.name) return node def create_nodes_cluster_hosts_mapping(self, cluster): node_mapping_dict = {} for cluster_host_object in cluster.get_hosts(): name = self.get_cluster_hostname(cluster_host_object) node_mapping_dict[name] = NodeMapping(self.nodes_as_dict[name], Munch.fromDict(cluster_host_object)) return node_mapping_dict def get_node_from_cluster_host(self, cluster_host_object): hostname = self.get_cluster_hostname(cluster_host_object) return self.get_node_by_hostname(hostname) def get_node_by_hostname(self, get_node_by_hostname): return self.nodes_as_dict[get_node_by_hostname] def get_cluster_host_obj_from_node(self, cluster, node): mapping = self.create_nodes_cluster_hosts_mapping(cluster=cluster) return mapping[node.name].cluster_host def get_cluster_hostname(self, cluster_host_object): inventory = json.loads(cluster_host_object["inventory"]) return inventory["hostname"] def set_hostnames(self, cluster): ipv6 = env_variables.get('ipv6') static_ips_config = env_variables.get('static_ips_config') if ipv6 or static_ips_config: # When using IPv6 with libvirt, hostnames are not set automatically by DHCP. Therefore, we must find out # the hostnames using terraform's tfstate file. In case of static ip, the hostname is localhost and must be # set to valid hostname # TODO - NodeController has no `params` and `tf` attributes network_name = self.controller.params.libvirt_network_name libvirt_nodes = utils.get_libvirt_nodes_from_tf_state(network_name, self.controller.tf.get_state()) nodes_count = env_variables.get('num_nodes') utils.update_hosts(cluster.api_client, cluster.id, libvirt_nodes, update_hostnames=True, update_roles=(nodes_count != 1)) def set_single_node_ip(self, cluster): self.controller.tf.change_variables({"single_node_ip": cluster.get_ip_for_single_node(cluster.api_client, cluster.id, env_variables[ 'machine_cidr'])})
38.483696
120
0.669115
8f25a61c58b41b256533304c38d95c1d2b9fd52e
257
py
Python
movies/urls.py
rngallen/beyond_basics
2cfb7d97699a733251e68357a70eada3d0278680
[ "MIT" ]
null
null
null
movies/urls.py
rngallen/beyond_basics
2cfb7d97699a733251e68357a70eada3d0278680
[ "MIT" ]
null
null
null
movies/urls.py
rngallen/beyond_basics
2cfb7d97699a733251e68357a70eada3d0278680
[ "MIT" ]
null
null
null
from django.urls import path from .views import AddMovieFormView, MovieSelectFormView app_name = "movies" urlpatterns = [ path("", MovieSelectFormView.as_view(), name="select_movie"), path("add/", AddMovieFormView.as_view(), name="add_movies"), ]
25.7
65
0.735409
fc057e33a1e31c774608a55e79d11f275ce5a779
8,488
py
Python
pogom/pgoapi/protos/POGOProtos/Data/Battle/BattleAction_pb2.py
tier4fusion/pogom-updated
31c4db3dfc85b19abb39c2e43f5efa530c65159e
[ "MIT" ]
463
2016-07-17T12:30:48.000Z
2021-12-25T11:25:12.000Z
pogom/pgoapi/protos/POGOProtos/Data/Battle/BattleAction_pb2.py
tier4fusion/pogom-updated
31c4db3dfc85b19abb39c2e43f5efa530c65159e
[ "MIT" ]
103
2016-07-17T15:22:07.000Z
2021-06-14T08:52:02.000Z
pogom/pgoapi/protos/POGOProtos/Data/Battle/BattleAction_pb2.py
tier4fusion/pogom-updated
31c4db3dfc85b19abb39c2e43f5efa530c65159e
[ "MIT" ]
226
2016-07-17T15:44:26.000Z
2021-10-03T02:26:32.000Z
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Data/Battle/BattleAction.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from POGOProtos.Data.Battle import BattleResults_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2 from POGOProtos.Data.Battle import BattleActionType_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2 from POGOProtos.Data.Battle import BattleParticipant_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Data/Battle/BattleAction.proto', package='POGOProtos.Data.Battle', syntax='proto3', serialized_pb=_b('\n)POGOProtos/Data/Battle/BattleAction.proto\x12\x16POGOProtos.Data.Battle\x1a*POGOProtos/Data/Battle/BattleResults.proto\x1a-POGOProtos/Data/Battle/BattleActionType.proto\x1a.POGOProtos/Data/Battle/BattleParticipant.proto\"\x85\x04\n\x0c\x42\x61ttleAction\x12\x36\n\x04Type\x18\x01 \x01(\x0e\x32(.POGOProtos.Data.Battle.BattleActionType\x12\x17\n\x0f\x61\x63tion_start_ms\x18\x02 \x01(\x03\x12\x13\n\x0b\x64uration_ms\x18\x03 \x01(\x05\x12\x14\n\x0c\x65nergy_delta\x18\x05 \x01(\x05\x12\x16\n\x0e\x61ttacker_index\x18\x06 \x01(\x05\x12\x14\n\x0ctarget_index\x18\x07 \x01(\x05\x12\x19\n\x11\x61\x63tive_pokemon_id\x18\x08 \x01(\x06\x12@\n\rplayer_joined\x18\t \x01(\x0b\x32).POGOProtos.Data.Battle.BattleParticipant\x12=\n\x0e\x62\x61ttle_results\x18\n \x01(\x0b\x32%.POGOProtos.Data.Battle.BattleResults\x12*\n\"damage_windows_start_timestamp_mss\x18\x0b \x01(\x03\x12(\n damage_windows_end_timestamp_mss\x18\x0c \x01(\x03\x12>\n\x0bplayer_left\x18\r \x01(\x0b\x32).POGOProtos.Data.Battle.BattleParticipant\x12\x19\n\x11target_pokemon_id\x18\x0e \x01(\x06\x62\x06proto3') , dependencies=[POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _BATTLEACTION = _descriptor.Descriptor( name='BattleAction', full_name='POGOProtos.Data.Battle.BattleAction', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='Type', full_name='POGOProtos.Data.Battle.BattleAction.Type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='action_start_ms', full_name='POGOProtos.Data.Battle.BattleAction.action_start_ms', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='duration_ms', full_name='POGOProtos.Data.Battle.BattleAction.duration_ms', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='energy_delta', full_name='POGOProtos.Data.Battle.BattleAction.energy_delta', index=3, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='attacker_index', full_name='POGOProtos.Data.Battle.BattleAction.attacker_index', index=4, number=6, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='target_index', full_name='POGOProtos.Data.Battle.BattleAction.target_index', index=5, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='active_pokemon_id', full_name='POGOProtos.Data.Battle.BattleAction.active_pokemon_id', index=6, number=8, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='player_joined', full_name='POGOProtos.Data.Battle.BattleAction.player_joined', index=7, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='battle_results', full_name='POGOProtos.Data.Battle.BattleAction.battle_results', index=8, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='damage_windows_start_timestamp_mss', full_name='POGOProtos.Data.Battle.BattleAction.damage_windows_start_timestamp_mss', index=9, number=11, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='damage_windows_end_timestamp_mss', full_name='POGOProtos.Data.Battle.BattleAction.damage_windows_end_timestamp_mss', index=10, number=12, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='player_left', full_name='POGOProtos.Data.Battle.BattleAction.player_left', index=11, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='target_pokemon_id', full_name='POGOProtos.Data.Battle.BattleAction.target_pokemon_id', index=12, number=14, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=209, serialized_end=726, ) _BATTLEACTION.fields_by_name['Type'].enum_type = POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2._BATTLEACTIONTYPE _BATTLEACTION.fields_by_name['player_joined'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2._BATTLEPARTICIPANT _BATTLEACTION.fields_by_name['battle_results'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2._BATTLERESULTS _BATTLEACTION.fields_by_name['player_left'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2._BATTLEPARTICIPANT DESCRIPTOR.message_types_by_name['BattleAction'] = _BATTLEACTION BattleAction = _reflection.GeneratedProtocolMessageType('BattleAction', (_message.Message,), dict( DESCRIPTOR = _BATTLEACTION, __module__ = 'POGOProtos.Data.Battle.BattleAction_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Data.Battle.BattleAction) )) _sym_db.RegisterMessage(BattleAction) # @@protoc_insertion_point(module_scope)
52.395062
1,092
0.776626
ae9a589a7aa493a39f4f2089db765b26406a71d4
3,681
py
Python
LPBv2/LPBv2/controller/actions/combat.py
DorskFR/LeaguePyBot
637aa1ba39f6b4dfea58b66e982589a8850b26f8
[ "MIT" ]
45
2020-11-28T04:45:45.000Z
2022-03-31T05:53:37.000Z
LPBv2/LPBv2/controller/actions/combat.py
DorskFR/LeaguePyBot
637aa1ba39f6b4dfea58b66e982589a8850b26f8
[ "MIT" ]
13
2021-01-15T00:50:10.000Z
2022-02-02T15:16:49.000Z
LPBv2/LPBv2/controller/actions/combat.py
DorskFR/LeaguePyBot
637aa1ba39f6b4dfea58b66e982589a8850b26f8
[ "MIT" ]
14
2020-12-21T10:03:31.000Z
2021-11-22T04:03:03.000Z
from . import Action from ...common import safest_position, average_position, debug_coro from ...logger import get_logger, Colors import asyncio logger = get_logger("LPBv2.Combat") class Combat(Action): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.offset_x = 50 self.offset_y = 50 @debug_coro async def cast_spell(self, key, x: int, y: int): self.mouse.set_position(x + self.offset_x, y + self.offset_y) self.keyboard.input_key(key) @debug_coro async def level_up_abilities(self): if self.game.player.info.level in [1, 4, 5, 7, 9]: self.keyboard.input_key("Ctrl" + self.hotkeys.first_ability) elif self.game.player.info.level in [2, 14, 15, 17, 18]: self.keyboard.input_key("Ctrl" + self.hotkeys.second_ability) elif self.game.player.info.level in [3, 8, 10, 12, 13]: self.keyboard.input_key("Ctrl" + self.hotkeys.third_ability) else: self.keyboard.input_key("Ctrl" + self.hotkeys.ultimate_ability) @debug_coro async def cast_spells(self, x: int, y: int, ultimate=False): if ultimate: await self.cast_spell(self.hotkeys.ultimate_ability, x, y) await self.cast_spell(self.hotkeys.first_ability, x, y) await self.cast_spell(self.hotkeys.second_ability, x, y) await self.cast_spell(self.hotkeys.third_ability, x, y) @debug_coro async def attack(self, x: int, y: int): await self.attack_move(x + self.offset_x, y + self.offset_y) @debug_coro async def get_closest_enemy_position(self): minions = self.game.game_units.units.enemy_minions if minions: return safest_position(minions) @debug_coro async def get_average_enemy_position(self): minions = self.game.game_units.units.enemy_minions if minions: return average_position(minions) @debug_coro async def attack_minions(self): await self.game.game_flow.update_current_action("Attacking minions") pos = await self.get_closest_enemy_position() if pos: await self.attack(*pos) pos = await self.get_average_enemy_position() if await self.game.player.has_more_than_50_percent_mana() and pos: await asyncio.sleep(1) await self.cast_spells(*pos) @debug_coro async def get_closest_enemy_champion_position(self): champions = self.game.game_units.units.enemy_champions if champions: return safest_position(champions) @debug_coro async def attack_champion(self): await self.game.game_flow.update_current_action("Attacking champion") pos = await self.get_closest_enemy_champion_position() if pos: await self.attack(*pos) if await self.game.player.has_more_than_25_percent_mana() and pos: await self.cast_spells(*pos, ultimate=True) @debug_coro async def get_closest_enemy_building_position(self): buildings = self.game.game_units.units.enemy_buildings if buildings: return safest_position(buildings) @debug_coro async def attack_building(self): await self.game.game_flow.update_current_action("Attacking building") pos = await self.get_closest_enemy_building_position() pos_ally = await self.get_riskiest_ally_position() if pos and pos_ally: distance = ((pos[0] + self.offset_x) - (pos[1] + self.offset_y * 2)) - ( pos_ally[0] - pos_ally[1] ) if distance < 500: await self.attack(*pos)
37.561224
84
0.657973
078e2b26be9788dfae1f960c596a422acf126a7c
1,193
py
Python
services/director-v2/tests/unit/test_models_comp_runs.py
colinRawlings/osparc-simcore
bf2f18d5bc1e574d5f4c238d08ad15156184c310
[ "MIT" ]
25
2018-04-13T12:44:12.000Z
2022-03-12T15:01:17.000Z
services/director-v2/tests/unit/test_models_comp_runs.py
colinRawlings/osparc-simcore
bf2f18d5bc1e574d5f4c238d08ad15156184c310
[ "MIT" ]
2,553
2018-01-18T17:11:55.000Z
2022-03-31T16:26:40.000Z
services/director-v2/tests/unit/test_models_comp_runs.py
mrnicegyu11/osparc-simcore
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
[ "MIT" ]
20
2018-01-18T19:45:33.000Z
2022-03-29T07:08:47.000Z
# pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name from pprint import pformat from typing import Any, Dict, Type import pytest from models_library.projects_state import RunningState from pydantic.main import BaseModel from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB @pytest.mark.parametrize( "model_cls", (CompRunsAtDB,), ) def test_computation_run_model_examples( model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) model_instance = model_cls(**example) assert model_instance, f"Failed with {name}" @pytest.mark.parametrize( "model_cls", (CompRunsAtDB,), ) def test_computation_run_model_with_run_result_value_field( model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] ): for name, example in model_cls_examples.items(): example["result"] = RunningState.RETRY.value print(name, ":", pformat(example)) model_instance = model_cls(**example) assert model_instance, f"Failed with {name}"
30.589744
77
0.738474
bb6e45e259b14861c393f279e7ac415320b9bba7
1,337
py
Python
cosypose/evaluation/meters/base.py
ompugao/cosypose
4e471c16f19d5ee632668cd52eaa57b562f287d6
[ "MIT" ]
202
2020-08-19T19:28:03.000Z
2022-03-29T07:10:47.000Z
cosypose/evaluation/meters/base.py
ompugao/cosypose
4e471c16f19d5ee632668cd52eaa57b562f287d6
[ "MIT" ]
66
2020-08-24T09:28:05.000Z
2022-03-31T07:11:06.000Z
cosypose/evaluation/meters/base.py
ompugao/cosypose
4e471c16f19d5ee632668cd52eaa57b562f287d6
[ "MIT" ]
66
2020-08-19T19:28:05.000Z
2022-03-18T20:47:55.000Z
from pathlib import Path import torch from collections import defaultdict from cosypose.utils.distributed import get_world_size, get_rank class Meter: def __init__(self): self.reset() def reset(self): self.datas = defaultdict(list) def add(self, pred_data, gt_data): raise NotImplementedError def is_data_valid(self, data): raise NotImplementedError def gather_distributed(self, tmp_dir): tmp_dir = Path(tmp_dir) tmp_dir.mkdir(exist_ok=True, parents=True) rank, world_size = get_rank(), get_world_size() tmp_file_template = (tmp_dir / 'rank={rank}.pth.tar').as_posix() if rank > 0: tmp_file = tmp_file_template.format(rank=rank) torch.save(self.datas, tmp_file) if world_size > 1: torch.distributed.barrier() if rank == 0 and world_size > 1: all_datas = self.datas for n in range(1, world_size): tmp_file = tmp_file_template.format(rank=n) datas = torch.load(tmp_file) for k in all_datas.keys(): all_datas[k].extend(datas.get(k, [])) Path(tmp_file).unlink() self.datas = all_datas if world_size > 1: torch.distributed.barrier() return
29.065217
72
0.601346
94546353c4af068ecbe37f4741683f1760124d5a
330
py
Python
wsgi.py
scott2b/S3Thing
32be7447afd9d350fef94cacedc659e05e146fdf
[ "MIT" ]
null
null
null
wsgi.py
scott2b/S3Thing
32be7447afd9d350fef94cacedc659e05e146fdf
[ "MIT" ]
null
null
null
wsgi.py
scott2b/S3Thing
32be7447afd9d350fef94cacedc659e05e146fdf
[ "MIT" ]
null
null
null
""" WSGI config for juxtapose project. """ import os, sys sys.stdout = sys.stderr from app import app as application if os.environ.get('FLASK_DEBUG', '').lower() == 'true': from werkzeug.debug import DebuggedApplication application.config['DEBUG'] = True application = DebuggedApplication(application, evalex=True)
23.571429
63
0.727273
7f296e33fce490eb18f5f77e3877f18e9ea4c237
15,093
py
Python
src/pycropml/transpiler/antlr_py/grammars/Python3Visitor.py
brichet/PyCrop2ML
7177996f72a8d95fdbabb772a16f1fd87b1d033e
[ "MIT" ]
5
2020-06-21T18:58:04.000Z
2022-01-29T21:32:28.000Z
src/pycropml/transpiler/antlr_py/grammars/Python3Visitor.py
brichet/PyCrop2ML
7177996f72a8d95fdbabb772a16f1fd87b1d033e
[ "MIT" ]
27
2018-12-04T15:35:44.000Z
2022-03-11T08:25:03.000Z
src/pycropml/transpiler/antlr_py/grammars/Python3Visitor.py
brichet/PyCrop2ML
7177996f72a8d95fdbabb772a16f1fd87b1d033e
[ "MIT" ]
7
2019-04-20T02:25:22.000Z
2021-11-04T07:52:35.000Z
# Generated from Documents\THESE\pycropml_pheno\src\pycropml\antlr_grammarV4\python\python3-py\Python3.g4 by ANTLR 4.8 from antlr4 import * if __name__ is not None and "." in __name__: from .Python3Parser import Python3Parser else: from Python3Parser import Python3Parser # This class defines a complete generic visitor for a parse tree produced by Python3Parser. class Python3Visitor(ParseTreeVisitor): # Visit a parse tree produced by Python3Parser#single_input. def visitSingle_input(self, ctx:Python3Parser.Single_inputContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#file_input. def visitFile_input(self, ctx:Python3Parser.File_inputContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#eval_input. def visitEval_input(self, ctx:Python3Parser.Eval_inputContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#decorator. def visitDecorator(self, ctx:Python3Parser.DecoratorContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#decorators. def visitDecorators(self, ctx:Python3Parser.DecoratorsContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#decorated. def visitDecorated(self, ctx:Python3Parser.DecoratedContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#async_funcdef. def visitAsync_funcdef(self, ctx:Python3Parser.Async_funcdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#funcdef. def visitFuncdef(self, ctx:Python3Parser.FuncdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#parameters. def visitParameters(self, ctx:Python3Parser.ParametersContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#typedargslist. def visitTypedargslist(self, ctx:Python3Parser.TypedargslistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#tfpdef. def visitTfpdef(self, ctx:Python3Parser.TfpdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#varargslist. def visitVarargslist(self, ctx:Python3Parser.VarargslistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#vfpdef. def visitVfpdef(self, ctx:Python3Parser.VfpdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#stmt. def visitStmt(self, ctx:Python3Parser.StmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#simple_stmt. def visitSimple_stmt(self, ctx:Python3Parser.Simple_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#small_stmt. def visitSmall_stmt(self, ctx:Python3Parser.Small_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#expr_stmt. def visitExpr_stmt(self, ctx:Python3Parser.Expr_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#annassign. def visitAnnassign(self, ctx:Python3Parser.AnnassignContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#testlist_star_expr. def visitTestlist_star_expr(self, ctx:Python3Parser.Testlist_star_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#augassign. def visitAugassign(self, ctx:Python3Parser.AugassignContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#del_stmt. def visitDel_stmt(self, ctx:Python3Parser.Del_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#pass_stmt. def visitPass_stmt(self, ctx:Python3Parser.Pass_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#flow_stmt. def visitFlow_stmt(self, ctx:Python3Parser.Flow_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#break_stmt. def visitBreak_stmt(self, ctx:Python3Parser.Break_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#continue_stmt. def visitContinue_stmt(self, ctx:Python3Parser.Continue_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#return_stmt. def visitReturn_stmt(self, ctx:Python3Parser.Return_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#yield_stmt. def visitYield_stmt(self, ctx:Python3Parser.Yield_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#raise_stmt. def visitRaise_stmt(self, ctx:Python3Parser.Raise_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#import_stmt. def visitImport_stmt(self, ctx:Python3Parser.Import_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#import_name. def visitImport_name(self, ctx:Python3Parser.Import_nameContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#import_from. def visitImport_from(self, ctx:Python3Parser.Import_fromContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#import_as_name. def visitImport_as_name(self, ctx:Python3Parser.Import_as_nameContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#dotted_as_name. def visitDotted_as_name(self, ctx:Python3Parser.Dotted_as_nameContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#import_as_names. def visitImport_as_names(self, ctx:Python3Parser.Import_as_namesContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#dotted_as_names. def visitDotted_as_names(self, ctx:Python3Parser.Dotted_as_namesContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#dotted_name. def visitDotted_name(self, ctx:Python3Parser.Dotted_nameContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#global_stmt. def visitGlobal_stmt(self, ctx:Python3Parser.Global_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#nonlocal_stmt. def visitNonlocal_stmt(self, ctx:Python3Parser.Nonlocal_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#assert_stmt. def visitAssert_stmt(self, ctx:Python3Parser.Assert_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#compound_stmt. def visitCompound_stmt(self, ctx:Python3Parser.Compound_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#async_stmt. def visitAsync_stmt(self, ctx:Python3Parser.Async_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#if_stmt. def visitIf_stmt(self, ctx:Python3Parser.If_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#while_stmt. def visitWhile_stmt(self, ctx:Python3Parser.While_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#for_stmt. def visitFor_stmt(self, ctx:Python3Parser.For_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#try_stmt. def visitTry_stmt(self, ctx:Python3Parser.Try_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#with_stmt. def visitWith_stmt(self, ctx:Python3Parser.With_stmtContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#with_item. def visitWith_item(self, ctx:Python3Parser.With_itemContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#except_clause. def visitExcept_clause(self, ctx:Python3Parser.Except_clauseContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#suite. def visitSuite(self, ctx:Python3Parser.SuiteContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#test. def visitTest(self, ctx:Python3Parser.TestContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#test_nocond. def visitTest_nocond(self, ctx:Python3Parser.Test_nocondContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#lambdef. def visitLambdef(self, ctx:Python3Parser.LambdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#lambdef_nocond. def visitLambdef_nocond(self, ctx:Python3Parser.Lambdef_nocondContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#or_test. def visitOr_test(self, ctx:Python3Parser.Or_testContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#and_test. def visitAnd_test(self, ctx:Python3Parser.And_testContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#not_test. def visitNot_test(self, ctx:Python3Parser.Not_testContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#comparison. def visitComparison(self, ctx:Python3Parser.ComparisonContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#comp_op. def visitComp_op(self, ctx:Python3Parser.Comp_opContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#star_expr. def visitStar_expr(self, ctx:Python3Parser.Star_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#expr. def visitExpr(self, ctx:Python3Parser.ExprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#xor_expr. def visitXor_expr(self, ctx:Python3Parser.Xor_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#and_expr. def visitAnd_expr(self, ctx:Python3Parser.And_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#shift_expr. def visitShift_expr(self, ctx:Python3Parser.Shift_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#arith_expr. def visitArith_expr(self, ctx:Python3Parser.Arith_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#term. def visitTerm(self, ctx:Python3Parser.TermContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#factor. def visitFactor(self, ctx:Python3Parser.FactorContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#power. def visitPower(self, ctx:Python3Parser.PowerContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#atom_expr. def visitAtom_expr(self, ctx:Python3Parser.Atom_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#atom. def visitAtom(self, ctx:Python3Parser.AtomContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#testlist_comp. def visitTestlist_comp(self, ctx:Python3Parser.Testlist_compContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#trailer. def visitTrailer(self, ctx:Python3Parser.TrailerContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#subscriptlist. def visitSubscriptlist(self, ctx:Python3Parser.SubscriptlistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#subscript. def visitSubscript(self, ctx:Python3Parser.SubscriptContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#sliceop. def visitSliceop(self, ctx:Python3Parser.SliceopContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#exprlist. def visitExprlist(self, ctx:Python3Parser.ExprlistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#testlist. def visitTestlist(self, ctx:Python3Parser.TestlistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#dictorsetmaker. def visitDictorsetmaker(self, ctx:Python3Parser.DictorsetmakerContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#classdef. def visitClassdef(self, ctx:Python3Parser.ClassdefContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#arglist. def visitArglist(self, ctx:Python3Parser.ArglistContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#argument. def visitArgument(self, ctx:Python3Parser.ArgumentContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#comp_iter. def visitComp_iter(self, ctx:Python3Parser.Comp_iterContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#comp_for. def visitComp_for(self, ctx:Python3Parser.Comp_forContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#comp_if. def visitComp_if(self, ctx:Python3Parser.Comp_ifContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#encoding_decl. def visitEncoding_decl(self, ctx:Python3Parser.Encoding_declContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#yield_expr. def visitYield_expr(self, ctx:Python3Parser.Yield_exprContext): return self.visitChildren(ctx) # Visit a parse tree produced by Python3Parser#yield_arg. def visitYield_arg(self, ctx:Python3Parser.Yield_argContext): return self.visitChildren(ctx) del Python3Parser
34.069977
118
0.749487
55c6111a0db462f587c01311a47ec561be0aadbc
3,448
py
Python
python/ray/tests/test_pydantic_serialization.py
77loopin/ray
9322f6aab53f4ca5baf5a3573e1ffde12feae519
[ "Apache-2.0" ]
21,382
2016-09-26T23:12:52.000Z
2022-03-31T21:47:45.000Z
python/ray/tests/test_pydantic_serialization.py
77loopin/ray
9322f6aab53f4ca5baf5a3573e1ffde12feae519
[ "Apache-2.0" ]
19,689
2016-09-17T08:21:25.000Z
2022-03-31T23:59:30.000Z
python/ray/tests/test_pydantic_serialization.py
gramhagen/ray
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
[ "Apache-2.0" ]
4,114
2016-09-23T18:54:01.000Z
2022-03-31T15:07:32.000Z
from dataclasses import dataclass from typing import Any, Dict, List, Optional import pytest from fastapi import FastAPI from pydantic import BaseModel import ray @pytest.fixture(scope="session") def start_ray(): ray.init(ignore_reinit_error=True) def test_serialize_cls(start_ray): class User(BaseModel): name: str ray.get(ray.put(User)) def test_serialize_instance(start_ray): class User(BaseModel): name: str ray.get(ray.put(User(name="a"))) def test_serialize_imported_cls(start_ray): from pydantic_module import User ray.get(ray.put(User)) def test_serialize_imported_instance(start_ray): from pydantic_module import user ray.get(ray.put(user)) def test_serialize_app_no_route(start_ray): app = FastAPI() ray.get(ray.put(app)) def test_serialize_app_no_validation(start_ray): app = FastAPI() @app.get("/") def hello() -> str: return "hi" ray.get(ray.put(app)) def test_serialize_app_primitive_type(start_ray): app = FastAPI() @app.get("/") def hello(v: str) -> str: return "hi" ray.get(ray.put(app)) def test_serialize_app_pydantic_type_imported(start_ray): from pydantic_module import User app = FastAPI() @app.get("/") def hello(v: str, u: User) -> str: return "hi" ray.get(ray.put(app)) def test_serialize_app_pydantic_type_inline(start_ray): class User(BaseModel): name: str app = FastAPI() @app.get("/") def hello(v: str, u: User) -> str: return "hi" ray.get(ray.put(app)) def test_serialize_app_imported(start_ray): from pydantic_module import app ray.get(ray.put(app)) def test_serialize_app_pydantic_type_closure_ref(start_ray): class User(BaseModel): name: str def make(): app = FastAPI() @app.get("/") def hello(v: str, u: User) -> str: return "hi" return app ray.get(ray.put(make)) def test_serialize_app_pydantic_type_closure_ref_import(start_ray): from pydantic_module import User def make(): app = FastAPI() @app.get("/") def hello(v: str, u: User) -> str: return "hi" return app ray.get(ray.put(make)) def test_serialize_app_pydantic_type_closure(start_ray): def make(): class User(BaseModel): name: str app = FastAPI() @app.get("/") def hello(v: str, u: User) -> str: return "hi" return app ray.get(ray.put(make)) def test_serialize_app_imported_closure(start_ray): from pydantic_module import closure ray.get(ray.put(closure)) def test_serialize_serve_dataclass(start_ray): @dataclass class BackendMetadata: is_blocking: bool = True autoscaling_config: Optional[Dict[str, Any]] = None class BackendConfig(BaseModel): internal_metadata: BackendMetadata = BackendMetadata() ray.get(ray.put(BackendConfig())) @ray.remote def consume(f): pass ray.get(consume.remote(BackendConfig())) def test_serialize_nested_field(start_ray): class B(BaseModel): v: List[int] # this shouldn't error B(v=[1]) @ray.remote def func(): # this shouldn't error return B(v=[1]) ray.get(func.remote()) if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", "-s", __file__]))
18.637838
67
0.637761
914126560c2e5822c71fc5b2c7e30452b586109a
963
py
Python
mcr_analyser/analyser.py
mknopp/mcr-analyser
50b106ede4a29c59851e0f5c11d42aaed5804b77
[ "MIT" ]
null
null
null
mcr_analyser/analyser.py
mknopp/mcr-analyser
50b106ede4a29c59851e0f5c11d42aaed5804b77
[ "MIT" ]
null
null
null
mcr_analyser/analyser.py
mknopp/mcr-analyser
50b106ede4a29c59851e0f5c11d42aaed5804b77
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # MCR-Analyser # # Copyright (C) 2021 Martin Knopp, Technical University of Munich # # This program is free software, see the LICENSE file in the root of this # repository for details import sys from qtpy import QtCore, QtWidgets from mcr_analyser.i18n import setup_getext from mcr_analyser.ui.mainwindow import MainWindow class Analyser(QtWidgets.QApplication): def __init__(self, localedir): super().__init__(sys.argv) self.setOrganizationName("TranslaTUM") self.setOrganizationDomain("www.translatum.tum.de") self.setApplicationName("MCR-Analyzer") setup_getext(localedir) self.window = MainWindow() def run(self): self.window.show() res = self.exec_() self.exit() return res def exit(self): QtCore.QCoreApplication.processEvents() def main(localedir=None): analyser = Analyser(localedir) sys.exit(analyser.run())
22.395349
73
0.685358
d02f63c0ed246030f1a7297f7a3f30e8d09fecfb
427
py
Python
earthquake_visualizer/models.py
ivanjedovnicki/dza
0e2df613f5e12eef65b9094312b40af96c11cab2
[ "MIT" ]
null
null
null
earthquake_visualizer/models.py
ivanjedovnicki/dza
0e2df613f5e12eef65b9094312b40af96c11cab2
[ "MIT" ]
null
null
null
earthquake_visualizer/models.py
ivanjedovnicki/dza
0e2df613f5e12eef65b9094312b40af96c11cab2
[ "MIT" ]
null
null
null
from django.db import models class EarthQuakeFeed(models.Model): title = models.CharField(max_length=50) latitude = models.DecimalField(max_digits=4, decimal_places=2) longitude = models.DecimalField(max_digits=4, decimal_places=2) magnitude = models.DecimalField(max_digits=2, decimal_places=1) time = models.DateTimeField() def __str__(self): return f'ID = {self.id}, TITLE = {self.title}'
32.846154
67
0.725995
e56a2d207d1b8d8eb1f9e164905f72cd807ac053
371
py
Python
api/__init__.py
joseangel-sc/conducto
6b873defc9327d6f8b51a826dd7a7ef6c3e41396
[ "Apache-2.0" ]
null
null
null
api/__init__.py
joseangel-sc/conducto
6b873defc9327d6f8b51a826dd7a7ef6c3e41396
[ "Apache-2.0" ]
null
null
null
api/__init__.py
joseangel-sc/conducto
6b873defc9327d6f8b51a826dd7a7ef6c3e41396
[ "Apache-2.0" ]
null
null
null
from .config import Config, dirconfig_write, dirconfig_select from .auth import Auth, AsyncAuth from .dir import Dir, AsyncDir from .pipeline import Pipeline, AsyncPipeline from .manager import Manager, AsyncManager from .secrets import Secrets, AsyncSecrets from .misc import connect_to_pipeline from .api_utils import InvalidResponse, get_auth_headers, is_conducto_url
41.222222
73
0.843666
13aad375b28f21aeb3d887abf8b7f7855494244a
1,439
py
Python
app.py
plorry/pairmaker
e64f4209abdd57968ae35e8269663aefbaf478b5
[ "MIT" ]
null
null
null
app.py
plorry/pairmaker
e64f4209abdd57968ae35e8269663aefbaf478b5
[ "MIT" ]
null
null
null
app.py
plorry/pairmaker
e64f4209abdd57968ae35e8269663aefbaf478b5
[ "MIT" ]
null
null
null
import pairmaker import db_conn from flask import Flask, abort, g, jsonify, request app = Flask(__name__) @app.route('/call', methods=['POST']) def call(): # Slack Webhook will just send a generic message body, # so we need to parse that and turn it into the # desired command + arguments r = {} with app.app_context(): channel_name = request.form.get('channel_id') db = get_db(channel_name) args = request.form.get('text').split(' ') command = args[1] if command == 'add': username = args[2] office = args[3] r = pairmaker.add_user(db, username, office) elif command == 'remove': username = args[2] r = pairmaker.remove_user(db, username) elif command == 'list': r = pairmaker.user_list(db) elif command == 'pairup': r = pairmaker.pairup(db) if len(args) == 2 else pairmaker.pairup(db, args[2]) return jsonify(r) def get_db(channel_name): db = getattr(g, f'_database_{channel_name}', None) if db is None: db = db_conn.get_db(channel_name) setattr(g, f'_database_{channel_name}', db) db_conn.init_db(db) return db if __name__ == '__main__': print('starting app') with app.app_context(): g.db = db_conn.get_db() db_conn.init_db(g.db) app.run()
29.367347
90
0.57262
2e654d4608a577c822f1b95deef3df7cbf4fd2f6
13,663
py
Python
mne/io/fieldtrip/tests/test_fieldtrip.py
enricovara/mne-python
f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc
[ "BSD-3-Clause" ]
1
2021-02-16T13:33:13.000Z
2021-02-16T13:33:13.000Z
mne/io/fieldtrip/tests/test_fieldtrip.py
enricovara/mne-python
f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc
[ "BSD-3-Clause" ]
12
2020-07-23T15:41:38.000Z
2021-02-24T09:38:41.000Z
mne/io/fieldtrip/tests/test_fieldtrip.py
enricovara/mne-python
f6f2aa7a97c3ae7ae5276202805d2f45de7b64cc
[ "BSD-3-Clause" ]
1
2021-04-01T15:56:39.000Z
2021-04-01T15:56:39.000Z
# -*- coding: UTF-8 -*- # Authors: Thomas Hartmann <[email protected]> # Dirk Gütlin <[email protected]> # # License: BSD (3-clause) import mne import os.path import pytest import copy import itertools import numpy as np from mne.datasets import testing from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events from mne.utils import _check_pandas_installed, requires_h5py from mne.io.fieldtrip.tests.helpers import (check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record) # missing: KIT: biggest problem here is that the channels do not have the same # names. # EGI: no calibration done in FT. so data is VERY different all_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia'] all_systems_epochs = ['neuromag306', 'CTF', 'CNT'] all_versions = ['v7', 'v73'] use_info = [True, False] all_test_params_raw = list(itertools.product(all_systems_raw, all_versions, use_info)) all_test_params_epochs = list(itertools.product(all_systems_epochs, all_versions, use_info)) # just for speed we skip some slowest ones -- the coverage should still # be sufficient for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]: all_test_params_epochs.pop(all_test_params_epochs.index(key)) all_test_params_raw.pop(all_test_params_raw.index(key)) no_info_warning = {'expected_warning': RuntimeWarning, 'match': NOINFO_WARNING} @pytest.mark.slowtest @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_epochs) def test_read_evoked(cur_system, version, use_info): """Test comparing reading an Evoked object and the FieldTrip version.""" test_data_folder_ft = get_data_paths(cur_system) mne_avg = get_evoked(cur_system) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'averaged_%s.mat' % (version,)) if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_evoked_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) mne_data = mne_avg.data[:, :-1] ft_data = avg_ft.data check_data(mne_data, ft_data, cur_system) check_info_fields(mne_avg, avg_ft, use_info) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_epochs) # Strange, non-deterministic Pandas errors: # "ValueError: cannot expose native-only dtype 'g' in non-native # byte order '<' via buffer interface" @pytest.mark.skipif(os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true', reason='Pandas problem on Azure CI') def test_read_epochs(cur_system, version, use_info, monkeypatch): """Test comparing reading an Epochs object and the FieldTrip version.""" pandas = _check_pandas_installed(strict=False) has_pandas = pandas is not False test_data_folder_ft = get_data_paths(cur_system) mne_epoched = get_epochs(cur_system) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'epoched_%s.mat' % (version,)) if has_pandas: if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_epochs_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) assert isinstance(epoched_ft.metadata, pandas.DataFrame) else: with pytest.warns(None) as warn_record: if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_epochs_fieldtrip(cur_fname, info) return epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) assert epoched_ft.metadata is None assert_warning_in_record(pandas_not_found_warning_msg, warn_record) if pytestwarning['expected_warning'] is not None: assert_warning_in_record(pytestwarning['match'], warn_record) mne_data = mne_epoched.get_data()[:, :, :-1] ft_data = epoched_ft.get_data() check_data(mne_data, ft_data, cur_system) check_info_fields(mne_epoched, epoched_ft, use_info) # weird sfreq from mne.externals.pymatreader import read_mat def modify_mat(fname, variable_names=None, ignore_fields=None): out = read_mat(fname, variable_names, ignore_fields) if 'fsample' in out['data']: out['data']['fsample'] = np.repeat(out['data']['fsample'], 2) return out monkeypatch.setattr(mne.externals.pymatreader, 'read_mat', modify_mat) with pytest.warns(RuntimeWarning, match='multiple'): mne.io.read_epochs_fieldtrip(cur_fname, info) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_raw) def test_raw(cur_system, version, use_info): """Test comparing reading a raw fiff file and the FieldTrip version.""" # Load the raw fiff file with mne test_data_folder_ft = get_data_paths(cur_system) raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'raw_%s.mat' % (version,)) if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_raw_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info) if cur_system == 'BTI' and not use_info: raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA', 'MyA', 'MxaA', 'MzaA']) if cur_system == 'eximia' and not use_info: raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE']) # Check that the data was loaded correctly check_data(raw_fiff_mne.get_data(), raw_fiff_ft.get_data(), cur_system) # Check info field check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info) @testing.requires_testing_data def test_load_epoched_as_raw(): """Test whether exception is thrown when loading epochs as raw.""" test_data_folder_ft = get_data_paths('neuromag306') info = get_raw_info('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') with pytest.raises(RuntimeError): mne.io.read_raw_fieldtrip(cur_fname, info) @testing.requires_testing_data def test_invalid_trialinfocolumn(): """Test for exceptions when using wrong values for trialinfo parameter.""" test_data_folder_ft = get_data_paths('neuromag306') info = get_raw_info('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') with pytest.raises(ValueError): mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=-1) with pytest.raises(ValueError): mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=3) @testing.requires_testing_data def test_create_events(): """Test 2dim trialinfo fields.""" from mne.externals.pymatreader import read_mat test_data_folder_ft = get_data_paths('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') original_data = read_mat(cur_fname, ['data', ]) new_data = copy.deepcopy(original_data) new_data['trialinfo'] = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) with pytest.raises(ValueError): _create_events(new_data, -1) for cur_col in np.arange(4): evts = _create_events(new_data, cur_col) assert np.all(evts[:, 2] == cur_col + 1) with pytest.raises(ValueError): _create_events(new_data, 4) @testing.requires_testing_data @pytest.mark.parametrize('version', all_versions) @requires_h5py def test_one_channel_elec_bug(version): """Test if loading data having only one elec in the elec field works.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'one_channel_elec_bug_data_%s.mat' % (version, )) with pytest.warns(**no_info_warning): mne.io.read_raw_fieldtrip(fname, info=None) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('version', all_versions) @pytest.mark.parametrize('type', ['averaged', 'epoched', 'raw']) @requires_h5py def test_throw_exception_on_cellarray(version, type): """Test for a meaningful exception when the data is a cell array.""" fname = os.path.join(get_data_paths('cellarray'), '%s_%s.mat' % (type, version)) info = get_raw_info('CNT') with pytest.raises(RuntimeError, match='Loading of data in cell arrays ' 'is not supported'): if type == 'averaged': mne.read_evoked_fieldtrip(fname, info) elif type == 'epoched': mne.read_epochs_fieldtrip(fname, info) elif type == 'raw': mne.io.read_raw_fieldtrip(fname, info) @testing.requires_testing_data def test_with_missing_channels(): """Test _create_info when channels are missing from info.""" cur_system = 'neuromag306' test_data_folder_ft = get_data_paths(cur_system) info = get_raw_info(cur_system) del info['chs'][1:20] info._update_redundant() with pytest.warns(RuntimeWarning): mne.io.read_raw_fieldtrip( os.path.join(test_data_folder_ft, 'raw_v7.mat'), info) mne.read_evoked_fieldtrip( os.path.join(test_data_folder_ft, 'averaged_v7.mat'), info) mne.read_epochs_fieldtrip( os.path.join(test_data_folder_ft, 'epoched_v7.mat'), info) @testing.requires_testing_data @pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') @pytest.mark.filterwarnings('ignore: Cannot guess the correct type') def test_throw_error_on_non_uniform_time_field(): """Test if an error is thrown when time fields are not uniform.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'not_uniform_time.mat') with pytest.raises(RuntimeError, match='Loading data with non-uniform ' 'times per epoch is not supported'): mne.io.read_epochs_fieldtrip(fname, info=None) @testing.requires_testing_data @pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') def test_throw_error_when_importing_old_ft_version_data(): """Test if an error is thrown if the data was saved with an old version.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'old_version.mat') with pytest.raises(RuntimeError, match='This file was created with ' 'an old version of FieldTrip. You ' 'can convert the data to the new ' 'version by loading it into ' 'FieldTrip and applying ' 'ft_selectdata with an ' 'empty cfg structure on it. ' 'Otherwise you can supply ' 'the Info field.'): mne.io.read_epochs_fieldtrip(fname, info=None)
41.40303
79
0.653956
fca632a21877871c3f6b9c1edf204158b79cdb16
3,190
py
Python
yearn/outputs/postgres/utils.py
pmdaly/yearn-exporter
d1e7697f8bf12cdb1126ea86fa350a26aea23cf8
[ "MIT" ]
1
2022-03-29T01:04:27.000Z
2022-03-29T01:04:27.000Z
yearn/outputs/postgres/utils.py
pmdaly/yearn-exporter
d1e7697f8bf12cdb1126ea86fa350a26aea23cf8
[ "MIT" ]
null
null
null
yearn/outputs/postgres/utils.py
pmdaly/yearn-exporter
d1e7697f8bf12cdb1126ea86fa350a26aea23cf8
[ "MIT" ]
1
2022-03-24T20:54:50.000Z
2022-03-24T20:54:50.000Z
import logging from brownie import ZERO_ADDRESS, chain, convert from pony.orm import db_session, select from yearn.entities import Address, Token, UserTx, db from yearn.multicall2 import fetch_multicall from yearn.utils import contract, is_contract logger = logging.getLogger(__name__) @db_session def cache_address(address: str) -> Address: address = convert.to_address(address) address_entity = Address.get(chainid=chain.id, address=address) if not address_entity: address_entity = Address(chainid=chain.id, address=address, is_contract=is_contract(address)) return address_entity @db_session def cache_token(address: str) -> Token: address_entity = cache_address(address) token = Token.get(address=address_entity) if not token: token = contract(address) symbol, name, decimals = fetch_multicall([token,'symbol'],[token,'name'],[token,'decimals']) token = Token(address=address_entity, symbol=symbol, name=name, decimals=decimals) print(f'token {symbol} added to postgres') return token @db_session def last_recorded_block(Entity: db.Entity) -> int: ''' Returns last block recorded for sql entity type `Entity` ''' TreasuryTx = 1 # NOTE: Get rid of this when treasury txs are implemented if Entity == UserTx: return select(max(e.block) for e in Entity if e.vault.address.chainid == chain.id).first() elif Entity == TreasuryTx: return select(max(e.block) for e in Entity if e.token.address.chainid == chain.id).first() return select(max(e.block) for e in Entity if e.chainid == chain.id).first() @db_session def fetch_balances(vault_address: str, block=None): token_dbid = select(t.token_id for t in Token if t.address.chainid == chain.id and t.address.address == vault_address).first() if block and block > last_recorded_block(UserTx): # NOTE: we use `postgres.` instead of `self.` so we can make use of parallelism raise Exception('this block has not yet been cached into postgres') if block: balances = db.select(f""" a.wallet, coalesce(amount_in,0) - coalesce(amount_out,0) balance from ( select "to" wallet, sum(amount) amount_in from user_txs where token_id = $token_dbid and block <= $block group by "to" ) a left join ( select "from" wallet, sum(amount) amount_out from user_txs where token_id = $token_dbid and block <= $block group by "from") b on a.wallet = b.wallet """) else: balances = db.select(f""" a.wallet, coalesce(amount_in,0) - coalesce(amount_out,0) balance from ( select "to" wallet, sum(amount) amount_in from user_txs where token_id = $token_dbid group by "to" ) a left join ( select "from" wallet, sum(amount) amount_out from user_txs where token_id = $token_dbid group by "from") b on a.wallet = b.wallet """) return {wallet: balance for wallet,balance in balances if wallet != ZERO_ADDRESS}
43.69863
130
0.652038
58a382241c1531e4f333c21244d2b8af0ea24428
4,027
py
Python
test/test_testing.py
dimitern/cibuildwheel
630653b786790c3dbfbadb22eb2eaa9cd53d9e08
[ "BSD-2-Clause" ]
1
2019-11-26T16:42:43.000Z
2019-11-26T16:42:43.000Z
test/test_testing.py
dimitern/cibuildwheel
630653b786790c3dbfbadb22eb2eaa9cd53d9e08
[ "BSD-2-Clause" ]
null
null
null
test/test_testing.py
dimitern/cibuildwheel
630653b786790c3dbfbadb22eb2eaa9cd53d9e08
[ "BSD-2-Clause" ]
null
null
null
import os import subprocess import textwrap import pytest from . import utils from . import test_projects project_with_a_test = test_projects.new_c_project( setup_cfg_add=textwrap.dedent(r''' [options.extras_require] test = nose ''') ) project_with_a_test.files['test/spam_test.py'] = r''' import os import platform import sys import struct from unittest import TestCase import spam def path_contains(parent, child): """ returns True if `child` is inside `parent`. Works around path-comparison bugs caused by short-paths on Windows e.g. vssadm~1 instead of vssadministrator """ parent = os.path.abspath(parent) child = os.path.abspath(child) while child != os.path.dirname(child): child = os.path.dirname(child) if os.stat(parent) == os.stat(child): # parent and child refer to the same directory on the filesystem return True return False class TestSpam(TestCase): def test_system(self): self.assertEqual(0, spam.system('python -c "exit(0)"')) self.assertNotEqual(0, spam.system('python -c "exit(1)"')) def test_virtualenv(self): # sys.prefix is different from sys.base_prefix when running a virtualenv # See https://docs.python.org/3/library/venv.html, which virtualenv seems # to honor in recent releases # Python 2 doesn't have sys.base_prefix by default if not hasattr(sys, 'base_prefix') or sys.prefix == sys.base_prefix: self.fail("Not running in a virtualenv") self.assertTrue(path_contains(sys.prefix, sys.executable)) self.assertTrue(path_contains(sys.prefix, spam.__file__)) def test_uname(self): if platform.system() == "Windows": return # if we're running in 32-bit Python, check that the machine is i686. # See #336 for more info. bits = struct.calcsize("P") * 8 if bits == 32: self.assertEqual(platform.machine(), "i686") ''' def test(tmp_path): project_dir = tmp_path / 'project' project_with_a_test.generate(project_dir) # build and test the wheels actual_wheels = utils.cibuildwheel_run(project_dir, add_env={ 'CIBW_TEST_REQUIRES': 'nose', # the 'false ||' bit is to ensure this command runs in a shell on # mac/linux. 'CIBW_TEST_COMMAND': 'false || nosetests {project}/test', 'CIBW_TEST_COMMAND_WINDOWS': 'COLOR 00 || nosetests {project}/test', }) # also check that we got the right wheels expected_wheels = utils.expected_wheels('spam', '0.1.0') assert set(actual_wheels) == set(expected_wheels) def test_extras_require(tmp_path): project_dir = tmp_path / 'project' project_with_a_test.generate(project_dir) # build and test the wheels actual_wheels = utils.cibuildwheel_run(project_dir, add_env={ 'CIBW_TEST_EXTRAS': 'test', # the 'false ||' bit is to ensure this command runs in a shell on # mac/linux. 'CIBW_TEST_COMMAND': 'false || nosetests {project}/test', 'CIBW_TEST_COMMAND_WINDOWS': 'COLOR 00 || nosetests {project}/test', }) # also check that we got the right wheels expected_wheels = utils.expected_wheels('spam', '0.1.0') assert set(actual_wheels) == set(expected_wheels) def test_failing_test(tmp_path): """Ensure a failing test causes cibuildwheel to error out and exit""" project_dir = tmp_path / 'project' output_dir = tmp_path / 'output' project_with_a_test.generate(project_dir) with pytest.raises(subprocess.CalledProcessError): utils.cibuildwheel_run(project_dir, output_dir=output_dir, add_env={ 'CIBW_TEST_COMMAND': 'false', # manylinux1 has a version of bash that's been shown to have # problems with this, so let's check that. 'CIBW_MANYLINUX_I686_IMAGE': 'manylinux1', 'CIBW_MANYLINUX_X86_64_IMAGE': 'manylinux1', }) assert len(os.listdir(output_dir)) == 0
33.008197
81
0.667494
5292955ec98d8c58dc944ee0a39511738b74c512
7,243
py
Python
tests/end2end/test_user_pattern.py
zhmcclient/python-zhmcclient
7d200afb0343a02535c52dc8b6ba0d224010075c
[ "Apache-2.0" ]
30
2016-08-24T10:02:19.000Z
2021-11-25T10:44:26.000Z
tests/end2end/test_user_pattern.py
zhmcclient/python-zhmcclient
7d200afb0343a02535c52dc8b6ba0d224010075c
[ "Apache-2.0" ]
883
2016-08-23T12:32:12.000Z
2022-03-28T13:18:24.000Z
tests/end2end/test_user_pattern.py
zhmcclient/python-zhmcclient
7d200afb0343a02535c52dc8b6ba0d224010075c
[ "Apache-2.0" ]
25
2017-06-23T18:10:51.000Z
2022-03-28T02:53:29.000Z
# Copyright 2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ End2end tests for user patterns (on CPCs in DPM mode). These tests do not change any existing user patterns, but create, modify and delete test user patterns. """ from __future__ import absolute_import, print_function import random import warnings import pytest from requests.packages import urllib3 import zhmcclient # pylint: disable=line-too-long,unused-import from zhmcclient.testutils.hmc_definition_fixtures import hmc_definition, hmc_session # noqa: F401, E501 from zhmcclient.testutils.cpc_fixtures import all_cpcs # noqa: F401, E501 # pylint: enable=line-too-long,unused-import from .utils import runtest_find_list, TEST_PREFIX, End2endTestWarning urllib3.disable_warnings() # Properties in minimalistic UserPattern objects (e.g. find_by_name()) UPATT_MINIMAL_PROPS = ['element-uri', 'name'] # Properties in UserPattern objects returned by list() without full props UPATT_LIST_PROPS = ['element-uri', 'name', 'type'] # Properties whose values can change between retrievals of UserPattern objects UPATT_VOLATILE_PROPS = [] def test_upatt_find_list(all_cpcs): # noqa: F811 # pylint: disable=redefined-outer-name """ Test list(), find(), findall(). """ if not all_cpcs: pytest.skip("No CPCs provided") for cpc in all_cpcs: session = cpc.manager.session console = cpc.manager.client.consoles.console client = console.manager.client api_version = client.query_api_version() hmc_version = api_version['hmc-version'] # pylint: disable=unnecessary-lambda hmc_version_info = list(map(lambda v: int(v), hmc_version.split('.'))) if hmc_version_info < [2, 13, 0]: pytest.skip("HMC {hv} does not yet support user patterns". format(hv=hmc_version)) # Pick a random user pattern upatt_list = console.user_patterns.list() if not upatt_list: msg_txt = "No user patterns defined on CPC {}". \ format(cpc.name) warnings.warn(msg_txt, End2endTestWarning) pytest.skip(msg_txt) upatt = random.choice(upatt_list) print("Testing on CPC {}".format(cpc.name)) runtest_find_list( session, console.user_patterns, upatt.name, 'name', 'element-uri', UPATT_VOLATILE_PROPS, UPATT_MINIMAL_PROPS, UPATT_LIST_PROPS) def test_upatt_crud(all_cpcs): # noqa: F811 # pylint: disable=redefined-outer-name """ Test create, read, update and delete a user pattern. """ if not all_cpcs: pytest.skip("No CPCs provided") for cpc in all_cpcs: print("Testing on CPC {}".format(cpc.name)) session = cpc.manager.session console = cpc.manager.client.consoles.console client = console.manager.client hd = session.hmc_definition api_version = client.query_api_version() hmc_version = api_version['hmc-version'] # pylint: disable=unnecessary-lambda hmc_version_info = list(map(lambda v: int(v), hmc_version.split('.'))) if hmc_version_info < [2, 13, 0]: pytest.skip("HMC {hv} does not yet support user patterns". format(hv=hmc_version)) upatt_name = TEST_PREFIX + ' test_upatt_crud upatt1' upatt_name_new = upatt_name + ' new' # Ensure a clean starting point for this test try: upatt = console.user_patterns.find(name=upatt_name) except zhmcclient.NotFound: pass else: warnings.warn( "Deleting test user pattern from previous run: '{p}' " "on CPC '{c}'". format(p=upatt_name, c=cpc.name), UserWarning) upatt.delete() # Pick a template user to be the template user for the user pattern template_users = console.users.findall(type='template') if not template_users: msg_txt = "No template users on HMC {h}".format(h=hd.hmc_host) warnings.warn(msg_txt, End2endTestWarning) pytest.skip(msg_txt) template_user = template_users[0] # Test creating the user pattern upatt_input_props = { 'name': upatt_name, 'description': 'Test user pattern for zhmcclient end2end tests', 'pattern': TEST_PREFIX + ' test_upatt_crud .+', 'type': 'regular-expression', 'retention-time': 180, 'user-template-uri': template_user.uri, # required until z13 } upatt_auto_props = {} # The code to be tested try: upatt = console.user_patterns.create(upatt_input_props) except zhmcclient.HTTPError as exc: if exc.http_status == 403 and exc.reason == 1: msg_txt = "HMC userid '{u}' is not authorized for the " \ "'Manage User Patterns' task on HMC {h}". \ format(u=hd.hmc_userid, h=hd.hmc_host) warnings.warn(msg_txt, End2endTestWarning) pytest.skip(msg_txt) else: raise for pn, exp_value in upatt_input_props.items(): assert upatt.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) upatt.pull_full_properties() for pn, exp_value in upatt_input_props.items(): assert upatt.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) for pn, exp_value in upatt_auto_props.items(): assert upatt.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) # Test updating a property of the user pattern new_desc = "Updated user pattern description." # The code to be tested upatt.update_properties(dict(description=new_desc)) assert upatt.properties['description'] == new_desc upatt.pull_full_properties() assert upatt.properties['description'] == new_desc # Test renaming the user pattern # The code to be tested upatt.update_properties(dict(name=upatt_name_new)) assert upatt.properties['name'] == upatt_name_new upatt.pull_full_properties() assert upatt.properties['name'] == upatt_name_new with pytest.raises(zhmcclient.NotFound): console.user_patterns.find(name=upatt_name) # Test deleting the user pattern # The code to be tested upatt.delete() with pytest.raises(zhmcclient.NotFound): console.user_patterns.find(name=upatt_name_new)
36.396985
104
0.64407
58c486bcc354daacb722aba2cb241ccfc2f0f9ac
1,251
py
Python
terrascript/boundary/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
507
2017-07-26T02:58:38.000Z
2022-01-21T12:35:13.000Z
terrascript/boundary/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
135
2017-07-20T12:01:59.000Z
2021-10-04T22:25:40.000Z
terrascript/boundary/r.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
81
2018-02-20T17:55:28.000Z
2022-01-31T07:08:40.000Z
# terrascript/boundary/r.py # Automatically generated by tools/makecode.py () import warnings warnings.warn( "using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2 ) import terrascript class boundary_account(terrascript.Resource): pass class boundary_account_oidc(terrascript.Resource): pass class boundary_account_password(terrascript.Resource): pass class boundary_auth_method(terrascript.Resource): pass class boundary_auth_method_oidc(terrascript.Resource): pass class boundary_auth_method_password(terrascript.Resource): pass class boundary_credential_library_vault(terrascript.Resource): pass class boundary_credential_store_vault(terrascript.Resource): pass class boundary_group(terrascript.Resource): pass class boundary_host(terrascript.Resource): pass class boundary_host_catalog(terrascript.Resource): pass class boundary_host_set(terrascript.Resource): pass class boundary_managed_group(terrascript.Resource): pass class boundary_role(terrascript.Resource): pass class boundary_scope(terrascript.Resource): pass class boundary_target(terrascript.Resource): pass class boundary_user(terrascript.Resource): pass
16.038462
79
0.785771
d40cb93550fc1859b0c95536efae827fad2471ce
29,566
py
Python
cvxpy/tests/test_expressions.py
quantopian/cvxpy
7deee4d172470aa8f629dab7fead50467afa75ff
[ "Apache-2.0" ]
5
2017-08-31T01:37:00.000Z
2022-03-24T04:23:09.000Z
cvxpy/tests/test_expressions.py
quantopian/cvxpy
7deee4d172470aa8f629dab7fead50467afa75ff
[ "Apache-2.0" ]
null
null
null
cvxpy/tests/test_expressions.py
quantopian/cvxpy
7deee4d172470aa8f629dab7fead50467afa75ff
[ "Apache-2.0" ]
6
2017-02-09T19:37:07.000Z
2021-01-07T00:17:54.000Z
""" Copyright 2017 Steven Diamond Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cvxpy.atoms import * from cvxpy.atoms.affine.add_expr import AddExpression from cvxpy.expressions.expression import * from cvxpy.expressions.variables import Variable, Semidef, NonNegative from cvxpy.expressions.constants import Constant from cvxpy.expressions.constants import Parameter from cvxpy import Problem, Minimize import cvxpy.utilities as u import cvxpy.interface.matrix_utilities as intf import cvxpy.settings as s from collections import deque import unittest from cvxpy.tests.base_test import BaseTest import numpy as np import warnings import sys PY35 = sys.version_info >= (3, 5) class TestExpressions(BaseTest): """ Unit tests for the expression/expression module. """ def setUp(self): self.a = Variable(name='a') self.x = Variable(2, name='x') self.y = Variable(3, name='y') self.z = Variable(2, name='z') self.A = Variable(2, 2, name='A') self.B = Variable(2, 2, name='B') self.C = Variable(3, 2, name='C') self.intf = intf.DEFAULT_INTF # Test the Variable class. def test_variable(self): x = Variable(2) y = Variable(2) assert y.name() != x.name() x = Variable(2, name='x') y = Variable() self.assertEqual(x.name(), 'x') self.assertEqual(x.size, (2, 1)) self.assertEqual(y.size, (1, 1)) self.assertEqual(x.curvature, s.AFFINE) self.assertEqual(x.canonical_form[0].size, (2, 1)) self.assertEqual(x.canonical_form[1], []) self.assertEqual(repr(self.x), "Variable(2, 1)") self.assertEqual(repr(self.A), "Variable(2, 2)") # # Scalar variable # coeff = self.a.coefficients() # self.assertEqual(coeff[self.a.id], [1]) # # Vector variable. # coeffs = x.coefficients() # self.assertItemsEqual(coeffs.keys(), [x.id]) # vec = coeffs[x.id][0] # self.assertEqual(vec.shape, (2,2)) # self.assertEqual(vec[0,0], 1) # # Matrix variable. # coeffs = self.A.coefficients() # self.assertItemsEqual(coeffs.keys(), [self.A.id]) # self.assertEqual(len(coeffs[self.A.id]), 2) # mat = coeffs[self.A.id][1] # self.assertEqual(mat.shape, (2,4)) # self.assertEqual(mat[0,2], 1) def test_assign_var_value(self): """Test assigning a value to a variable. """ # Scalar variable. a = Variable() a.value = 1 self.assertEqual(a.value, 1) with self.assertRaises(Exception) as cm: a.value = [2, 1] self.assertEqual(str(cm.exception), "Invalid dimensions (2, 1) for Variable value.") # Test assigning None. a.value = 1 a.value = None assert a.value is None # Vector variable. x = Variable(2) x.value = [2, 1] self.assertItemsAlmostEqual(x.value, [2, 1]) # Matrix variable. A = Variable(3, 2) A.value = np.ones((3, 2)) self.assertItemsAlmostEqual(A.value, np.ones((3, 2))) # Test assigning negative val to nonnegative variable. x = NonNegative() with self.assertRaises(Exception) as cm: x.value = -2 self.assertEqual(str(cm.exception), "Invalid sign for NonNegative value.") # Small negative values are rounded to 0. x.value = -1e-8 self.assertEqual(x.value, 0) # Test tranposing variables. def test_transpose_variable(self): var = self.a.T self.assertEqual(var.name(), "a") self.assertEqual(var.size, (1, 1)) self.a.save_value(2) self.assertEqual(var.value, 2) var = self.x.T self.assertEqual(var.name(), "x.T") self.assertEqual(var.size, (1, 2)) self.x.save_value(np.matrix([1, 2]).T) self.assertEqual(var.value[0, 0], 1) self.assertEqual(var.value[0, 1], 2) var = self.C.T self.assertEqual(var.name(), "C.T") self.assertEqual(var.size, (2, 3)) # coeffs = var.canonical_form[0].coefficients() # mat = coeffs.values()[0][0] # self.assertEqual(mat.size, (2,6)) # self.assertEqual(mat[1,3], 1) index = var[1, 0] self.assertEqual(index.name(), "C.T[1, 0]") self.assertEqual(index.size, (1, 1)) var = self.x.T.T self.assertEqual(var.name(), "x.T.T") self.assertEqual(var.size, (2, 1)) # Test the Constant class. def test_constants(self): c = Constant(2) self.assertEqual(c.name(), str(2)) c = Constant(2) self.assertEqual(c.value, 2) self.assertEqual(c.size, (1, 1)) self.assertEqual(c.curvature, s.CONSTANT) self.assertEqual(c.sign, s.POSITIVE) self.assertEqual(Constant(-2).sign, s.NEGATIVE) self.assertEqual(Constant(0).sign, s.ZERO) self.assertEqual(c.canonical_form[0].size, (1, 1)) self.assertEqual(c.canonical_form[1], []) # coeffs = c.coefficients() # self.assertEqual(coeffs.keys(), [s.CONSTANT]) # self.assertEqual(coeffs[s.CONSTANT], [2]) # Test the sign. c = Constant([[2], [2]]) self.assertEqual(c.size, (1, 2)) self.assertEqual(c.sign, s.POSITIVE) self.assertEqual((-c).sign, s.NEGATIVE) self.assertEqual((0*c).sign, s.ZERO) c = Constant([[2], [-2]]) self.assertEqual(c.sign, s.UNKNOWN) # Test sign of a complex expression. c = Constant([1, 2]) A = Constant([[1, 1], [1, 1]]) exp = c.T*A*c self.assertEqual(exp.sign, s.POSITIVE) self.assertEqual((c.T*c).sign, s.POSITIVE) exp = c.T.T self.assertEqual(exp.sign, s.POSITIVE) exp = c.T*self.A self.assertEqual(exp.sign, s.UNKNOWN) # Test repr. self.assertEqual(repr(c), "Constant(CONSTANT, POSITIVE, (2, 1))") def test_1D_array(self): """Test NumPy 1D arrays as constants. """ c = np.array([1, 2]) p = Parameter(2) p.value = [1, 1] self.assertEqual((c*p).value, 3) self.assertEqual((c*self.x).size, (1, 1)) self.x.save_value(np.array([1, 4])) self.assertEqual((c.T*self.x).value, 9) # Test the Parameter class. def test_parameters(self): p = Parameter(name='p') self.assertEqual(p.name(), "p") self.assertEqual(p.size, (1, 1)) p = Parameter(4, 3, sign="positive") with self.assertRaises(Exception) as cm: p.value = 1 self.assertEqual(str(cm.exception), "Invalid dimensions (1, 1) for Parameter value.") val = -np.ones((4, 3)) val[0, 0] = 2 p = Parameter(4, 3, sign="positive") with self.assertRaises(Exception) as cm: p.value = val self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.") p = Parameter(4, 3, sign="negative") with self.assertRaises(Exception) as cm: p.value = val self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.") # No error for unknown sign. p = Parameter(4, 3) p.value = val # Initialize a parameter with a value. p = Parameter(value=10) self.assertEqual(p.value, 10) # Test assigning None. p.value = 10 p.value = None assert p.value is None with self.assertRaises(Exception) as cm: p = Parameter(2, 1, sign="negative", value=[2, 1]) self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.") with self.assertRaises(Exception) as cm: p = Parameter(4, 3, sign="positive", value=[1, 2]) self.assertEqual(str(cm.exception), "Invalid dimensions (2, 1) for Parameter value.") # Test repr. p = Parameter(4, 3, sign="negative") self.assertEqual(repr(p), 'Parameter(4, 3, sign="NEGATIVE")') # Test the AddExpresion class. def test_add_expression(self): # Vectors c = Constant([2, 2]) exp = self.x + c self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(exp.canonical_form[0].size, (2, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), self.x.name() + " + " + c.name()) self.assertEqual(exp.size, (2, 1)) z = Variable(2, name='z') exp = exp + z + self.x with self.assertRaises(Exception) as cm: (self.x + self.y) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Matrices exp = self.A + self.B self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (2, 2)) with self.assertRaises(Exception) as cm: (self.A + self.C) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)") with self.assertRaises(Exception) as cm: AddExpression([self.A, self.C]) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)") # Test that sum is flattened. exp = self.x + c + self.x self.assertEqual(len(exp.args), 3) # Test repr. self.assertEqual(repr(exp), "Expression(AFFINE, UNKNOWN, (2, 1))") # Test the SubExpresion class. def test_sub_expression(self): # Vectors c = Constant([2, 2]) exp = self.x - c self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(exp.canonical_form[0].size, (2, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), self.x.name() + " - " + Constant([2,2]).name()) self.assertEqual(exp.size, (2, 1)) z = Variable(2, name='z') exp = exp - z - self.x with self.assertRaises(Exception) as cm: (self.x - self.y) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Matrices exp = self.A - self.B self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (2, 2)) with self.assertRaises(Exception) as cm: (self.A - self.C) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)") # Test repr. self.assertEqual(repr(self.x - c), "Expression(AFFINE, UNKNOWN, (2, 1))") # Test the MulExpresion class. def test_mul_expression(self): # Vectors c = Constant([[2], [2]]) exp = c*self.x self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual((c[0]*self.x).sign, s.UNKNOWN) self.assertEqual(exp.canonical_form[0].size, (1, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), c.name() + " * " + self.x.name()) self.assertEqual(exp.size, (1, 1)) with self.assertRaises(Exception) as cm: ([2, 2, 3]*self.x) self.assertEqual(str(cm.exception), "Incompatible dimensions (3, 1) (2, 1)") # Matrices with self.assertRaises(Exception) as cm: Constant([[2, 1], [2, 2]]) * self.C self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)") # Affine times affine is okay with warnings.catch_warnings(): warnings.simplefilter("ignore") q = self.A * self.B self.assertTrue(q.is_quadratic()) # Nonaffine times nonconstant raises error with warnings.catch_warnings(): warnings.simplefilter("ignore") with self.assertRaises(Exception) as cm: ((self.A * self.B) * self.A) self.assertEqual(str(cm.exception), "Cannot multiply UNKNOWN and AFFINE.") # Constant expressions T = Constant([[1, 2, 3], [3, 5, 5]]) exp = (T + T) * self.B self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (3, 2)) # Expression that would break sign multiplication without promotion. c = Constant([[2], [2], [-2]]) exp = [[1], [2]] + c*self.C self.assertEqual(exp.sign, s.UNKNOWN) # Scalar constants on the right should be moved left. expr = self.C*2 self.assertEqual(expr.args[0].value, 2) # Scalar variables on the left should be moved right. expr = self.a*[2, 1] self.assertItemsAlmostEqual(expr.args[0].value, [2, 1]) def test_matmul_expression(self): """Test matmul function, corresponding to .__matmul__( operator. """ if PY35: # Vectors c = Constant([[2], [2]]) exp = c.__matmul__(self.x) self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(exp.canonical_form[0].size, (1, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), c.name() + " .__matmul__( " + self.x.name()) self.assertEqual(exp.size, (1, 1)) with self.assertRaises(Exception) as cm: self.x.__matmul__(2) self.assertEqual(str(cm.exception), "Scalar operands are not allowed, use '*' instead") with self.assertRaises(Exception) as cm: (self.x.__matmul__(np.array([2, 2, 3]))) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)") # Matrices with self.assertRaises(Exception) as cm: Constant([[2, 1], [2, 2]]) .__matmul__(self.C) self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)") # Affine times affine is okay with warnings.catch_warnings(): warnings.simplefilter("ignore") q = self.A .__matmul__(self.B) self.assertTrue(q.is_quadratic()) # Nonaffine times nonconstant raises error with warnings.catch_warnings(): warnings.simplefilter("ignore") with self.assertRaises(Exception) as cm: (self.A.__matmul__(self.B).__matmul__(self.A)) self.assertEqual(str(cm.exception), "Cannot multiply UNKNOWN and AFFINE.") # Constant expressions T = Constant([[1, 2, 3], [3, 5, 5]]) exp = (T + T) .__matmul__(self.B) self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (3, 2)) # Expression that would break sign multiplication without promotion. c = Constant([[2], [2], [-2]]) exp = [[1], [2]] + c.__matmul__(self.C) self.assertEqual(exp.sign, s.UNKNOWN) else: pass # Test the DivExpresion class. def test_div_expression(self): # Vectors exp = self.x/2 self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(exp.canonical_form[0].size, (2, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), c.name() + " * " + self.x.name()) self.assertEqual(exp.size, (2, 1)) with self.assertRaises(Exception) as cm: (self.x/[2, 2, 3]) print(cm.exception) self.assertEqual(str(cm.exception), "Can only divide by a scalar constant.") # Constant expressions. c = Constant(2) exp = c/(3 - 5) self.assertEqual(exp.curvature, s.CONSTANT) self.assertEqual(exp.size, (1, 1)) self.assertEqual(exp.sign, s.NEGATIVE) # Parameters. p = Parameter(sign="positive") exp = 2/p p.value = 2 self.assertEqual(exp.value, 1) rho = Parameter(sign="positive") rho.value = 1 self.assertEqual(rho.sign, s.POSITIVE) self.assertEqual(Constant(2).sign, s.POSITIVE) self.assertEqual((Constant(2)/Constant(2)).sign, s.POSITIVE) self.assertEqual((Constant(2)*rho).sign, s.POSITIVE) self.assertEqual((rho/2).sign, s.POSITIVE) # Test the NegExpression class. def test_neg_expression(self): # Vectors exp = -self.x self.assertEqual(exp.curvature, s.AFFINE) assert exp.is_affine() self.assertEqual(exp.sign, s.UNKNOWN) assert not exp.is_positive() self.assertEqual(exp.canonical_form[0].size, (2, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), "-%s" % self.x.name()) self.assertEqual(exp.size, self.x.size) # Matrices exp = -self.C self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (3, 2)) # Test promotion of scalar constants. def test_scalar_const_promotion(self): # Vectors exp = self.x + 2 self.assertEqual(exp.curvature, s.AFFINE) assert exp.is_affine() self.assertEqual(exp.sign, s.UNKNOWN) assert not exp.is_negative() self.assertEqual(exp.canonical_form[0].size, (2, 1)) self.assertEqual(exp.canonical_form[1], []) # self.assertEqual(exp.name(), self.x.name() + " + " + Constant(2).name()) self.assertEqual(exp.size, (2, 1)) self.assertEqual((4 - self.x).size, (2, 1)) self.assertEqual((4 * self.x).size, (2, 1)) self.assertEqual((4 <= self.x).size, (2, 1)) self.assertEqual((4 == self.x).size, (2, 1)) self.assertEqual((self.x >= 4).size, (2, 1)) # Matrices exp = (self.A + 2) + 4 self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual((3 * self.A).size, (2, 2)) self.assertEqual(exp.size, (2, 2)) # Test indexing expression. def test_index_expression(self): # Tuple of integers as key. exp = self.x[1, 0] # self.assertEqual(exp.name(), "x[1,0]") self.assertEqual(exp.curvature, s.AFFINE) assert exp.is_affine() self.assertEqual(exp.size, (1, 1)) # coeff = exp.canonical_form[0].coefficients()[self.x][0] # self.assertEqual(coeff[0,1], 1) self.assertEqual(exp.value, None) exp = self.x[1, 0].T # self.assertEqual(exp.name(), "x[1,0]") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) with self.assertRaises(Exception) as cm: (self.x[2, 0]) self.assertEqual(str(cm.exception), "Index/slice out of bounds.") # Slicing exp = self.C[0:2, 1] # self.assertEqual(exp.name(), "C[0:2,1]") self.assertEqual(exp.size, (2, 1)) exp = self.C[0:, 0:2] # self.assertEqual(exp.name(), "C[0:,0:2]") self.assertEqual(exp.size, (3, 2)) exp = self.C[0::2, 0::2] # self.assertEqual(exp.name(), "C[0::2,0::2]") self.assertEqual(exp.size, (2, 1)) exp = self.C[:3, :1:2] # self.assertEqual(exp.name(), "C[0:3,0]") self.assertEqual(exp.size, (3, 1)) exp = self.C[0:, 0] # self.assertEqual(exp.name(), "C[0:,0]") self.assertEqual(exp.size, (3, 1)) c = Constant([[1, -2], [0, 4]]) exp = c[1, 1] self.assertEqual(exp.curvature, s.CONSTANT) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(c[0, 1].sign, s.UNKNOWN) self.assertEqual(c[1, 0].sign, s.UNKNOWN) self.assertEqual(exp.size, (1, 1)) self.assertEqual(exp.value, 4) c = Constant([[1, -2, 3], [0, 4, 5], [7, 8, 9]]) exp = c[0:3, 0:4:2] self.assertEqual(exp.curvature, s.CONSTANT) assert exp.is_constant() self.assertEqual(exp.size, (3, 2)) self.assertEqual(exp[0, 1].value, 7) # Slice of transpose exp = self.C.T[0:2, 1] self.assertEqual(exp.size, (2, 1)) # Arithmetic expression indexing exp = (self.x + self.z)[1, 0] # self.assertEqual(exp.name(), "x[1,0] + z[1,0]") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.sign, s.UNKNOWN) self.assertEqual(exp.size, (1, 1)) exp = (self.x + self.a)[1, 0] # self.assertEqual(exp.name(), "x[1,0] + a") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) exp = (self.x - self.z)[1, 0] # self.assertEqual(exp.name(), "x[1,0] - z[1,0]") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) exp = (self.x - self.a)[1, 0] # self.assertEqual(exp.name(), "x[1,0] - a") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) exp = (-self.x)[1, 0] # self.assertEqual(exp.name(), "-x[1,0]") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) c = Constant([[1, 2], [3, 4]]) exp = (c*self.x)[1, 0] # self.assertEqual(exp.name(), "[[2], [4]] * x[0:,0]") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) c = Constant([[1, 2], [3, 4]]) exp = (c*self.a)[1, 0] # self.assertEqual(exp.name(), "2 * a") self.assertEqual(exp.curvature, s.AFFINE) self.assertEqual(exp.size, (1, 1)) def test_neg_indices(self): """Test negative indices. """ c = Constant([[1, 2], [3, 4]]) exp = c[-1, -1] self.assertEqual(exp.value, 4) self.assertEqual(exp.size, (1, 1)) self.assertEqual(exp.curvature, s.CONSTANT) c = Constant([1, 2, 3, 4]) exp = c[1:-1] self.assertItemsAlmostEqual(exp.value, [2, 3]) self.assertEqual(exp.size, (2, 1)) self.assertEqual(exp.curvature, s.CONSTANT) c = Constant([1, 2, 3, 4]) exp = c[::-1] self.assertItemsAlmostEqual(exp.value, [4, 3, 2, 1]) self.assertEqual(exp.size, (4, 1)) self.assertEqual(exp.curvature, s.CONSTANT) x = Variable(4) Problem(Minimize(0), [x[::-1] == c]).solve() self.assertItemsAlmostEqual(x.value, [4, 3, 2, 1]) self.assertEqual(x[::-1].size, (4, 1)) x = Variable(2) self.assertEqual(x[::-1].size, (2, 1)) x = Variable(100, name="x") self.assertEqual("x[:-1, 0]", str(x[:-1])) c = Constant([[1, 2], [3, 4]]) expr = c[0, 2:0:-1] self.assertEqual(expr.size, (1, 1)) self.assertAlmostEqual(expr.value, 3) expr = c[0, 2::-1] self.assertEqual(expr.size, (1, 2)) self.assertItemsAlmostEqual(expr.value, [3, 1]) def test_logical_indices(self): """Test indexing with boolean arrays. """ A = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) C = Constant(A) # Boolean array. expr = C[A <= 2] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[A <= 2], expr.value) expr = C[A % 2 == 0] self.assertEqual(expr.size, (6, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[A % 2 == 0], expr.value) # Boolean array for rows, index for columns. expr = C[np.array([True, False, True]), 3] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[np.array([True, False, True]), 3], expr.value) # Index for row, boolean array for columns. expr = C[1, np.array([True, False, False, True])] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[1, np.array([True, False, False, True])], expr.value) # Boolean array for rows, slice for columns. expr = C[np.array([True, True, True]), 1:3] self.assertEqual(expr.size, (3, 2)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[np.array([True, True, True]), 1:3], expr.value) # Slice for row, boolean array for columns. expr = C[1:-1, np.array([True, False, True, True])] self.assertEqual(expr.size, (1, 3)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[1:-1, np.array([True, False, True, True])], expr.value) # Boolean arrays for rows and columns. # Not sure what this does. expr = C[np.array([True, True, True]), np.array([True, False, True, True])] self.assertEqual(expr.size, (3, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[np.array([True, True, True]), np.array([True, False, True, True])], expr.value) def test_selector_list_indices(self): """Test indexing with lists/ndarrays of indices. """ A = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) C = Constant(A) # List for rows. expr = C[[1, 2]] self.assertEqual(expr.size, (2, 4)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[[1, 2]], expr.value) # List for rows, index for columns. expr = C[[0, 2], 3] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[[0, 2], 3], expr.value) # Index for row, list for columns. expr = C[1, [0, 2]] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[1, [0, 2]], expr.value) # List for rows, slice for columns. expr = C[[0, 2], 1:3] self.assertEqual(expr.size, (2, 2)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[[0, 2], 1:3], expr.value) # Slice for row, list for columns. expr = C[1:-1, [0, 2]] self.assertEqual(expr.size, (1, 2)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[1:-1, [0, 2]], expr.value) # Lists for rows and columns. expr = C[[0, 1], [1, 3]] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[[0, 1], [1, 3]], expr.value) # Ndarray for rows, list for columns. expr = C[np.array([0, 1]), [1, 3]] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[np.array([0, 1]), [1, 3]], expr.value) # Ndarrays for rows and columns. expr = C[np.array([0, 1]), np.array([1, 3])] self.assertEqual(expr.size, (2, 1)) self.assertEqual(expr.sign, s.POSITIVE) self.assertItemsAlmostEqual(A[np.array([0, 1]), np.array([1, 3])], expr.value) def test_powers(self): exp = self.x**2 self.assertEqual(exp.curvature, s.CONVEX) exp = self.x**0.5 self.assertEqual(exp.curvature, s.CONCAVE) exp = self.x**-1 self.assertEqual(exp.curvature, s.CONVEX) def test_sum(self): """Test built-in sum. Not good usage. """ self.a.value = 1 expr = sum(self.a) self.assertEqual(expr.value, 1) self.x.value = [1, 2] expr = sum(self.x) self.assertEqual(expr.value, 3) def test_var_copy(self): """Test the copy function for variable types. """ x = Variable(3, 4, name="x") y = x.copy() self.assertEqual(y.size, (3, 4)) self.assertEqual(y.name(), "x") x = Semidef(5, name="x") y = x.copy() self.assertEqual(y.size, (5, 5)) def test_param_copy(self): """Test the copy function for Parameters. """ x = Parameter(3, 4, name="x", sign="positive") y = x.copy() self.assertEqual(y.size, (3, 4)) self.assertEqual(y.name(), "x") self.assertEqual(y.sign, "POSITIVE") def test_constant_copy(self): """Test the copy function for Constants. """ x = Constant(2) y = x.copy() self.assertEqual(y.size, (1, 1)) self.assertEqual(y.value, 2) def test_is_pwl(self): """Test is_pwl() """ A = np.random.randn(2, 3) b = np.random.randn(2) expr = A * self.y - b self.assertEqual(expr.is_pwl(), True) expr = max_elemwise(1, 3 * self.y) self.assertEqual(expr.is_pwl(), True) expr = abs(self.y) self.assertEqual(expr.is_pwl(), True) expr = pnorm(3 * self.y, 1) self.assertEqual(expr.is_pwl(), True) expr = pnorm(3 * self.y ** 2, 1) self.assertEqual(expr.is_pwl(), False)
35.664656
93
0.565717
a97f602879339c59c87cd941ee62aafc7a6d28af
3,067
py
Python
tests/pymcell4_positive/1300_wall_hit_callback/model.py
mcellteam/mcell-tests
34d2d967b75d56edbae999bf0090641850f4f4fe
[ "MIT" ]
1
2021-08-13T20:40:54.000Z
2021-08-13T20:40:54.000Z
tests/pymcell4_positive/1300_wall_hit_callback/model.py
mcellteam/mcell_tests
34d2d967b75d56edbae999bf0090641850f4f4fe
[ "MIT" ]
null
null
null
tests/pymcell4_positive/1300_wall_hit_callback/model.py
mcellteam/mcell_tests
34d2d967b75d56edbae999bf0090641850f4f4fe
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys import os MCELL_PATH = os.environ.get('MCELL_PATH', '') if MCELL_PATH: sys.path.append(os.path.join(MCELL_PATH, 'lib')) else: print("Error: variable MCELL_PATH that is used to find the mcell library was not set.") sys.exit(1) import mcell as m from parameters import * if len(sys.argv) == 3 and sys.argv[1] == '-seed': # overwrite value SEED defined in module parameters SEED = int(sys.argv[2]) import subsystem import instantiation model = m.Model() # ---- configuration ---- model.config.time_step = TIME_STEP model.config.seed = SEED model.config.total_iterations = ITERATIONS model.config.partition_dimension = 10 model.config.subpartition_dimension = 2.5 # ---- default configuration overrides ---- # ---- add components ---- model.add_subsystem(subsystem.subsystem) model.add_instantiation(instantiation.instantiation) # ---- initialization and execution ---- model.initialize() if DUMP: model.dump_internal_state() if EXPORT_DATA_MODEL and model.viz_outputs: model.export_data_model() # --------------- test ------------------------ # example class passed as context to the callback class HitCount(): def __init__(self): self.count = 0 self.current_it = 0 def check_time(time, it): # cannot start before iteration start #print("---") #print(time) #print(it) #print(it * TIME_STEP) assert time >= it * TIME_STEP # we are running iterations one by one therefore # the max time is the end of this iteration assert time <= (it + 1) * TIME_STEP def check_pos(pos3d): EPS = 1e-9 # min and max coordinates from Tetrahedron_vertex_list assert pos3d[0] >= -0.01 - EPS and pos3d[0] <= 0.02 + EPS assert pos3d[1] >= -0.02 - EPS and pos3d[1] <= 0.02 + EPS assert pos3d[2] >= -0.01 - EPS and pos3d[2] <= 0.02 + EPS tetrahedron_object = model.find_geometry_object('Tetrahedron') assert tetrahedron_object def wall_hit_callback(wall_hit_info, context): #print("Wall hit callback called") #print(wall_hit_info) context.count += 1 assert wall_hit_info.geometry_object is tetrahedron_object assert wall_hit_info.wall_index < len(tetrahedron_object.wall_list) #print("-t") check_time(wall_hit_info.time, context.current_it) #print("-t-before") check_time(wall_hit_info.time_before_hit, context.current_it) assert wall_hit_info.time_before_hit <= wall_hit_info.time check_pos(wall_hit_info.pos3d) check_pos(wall_hit_info.pos3d_before_hit) vm_species = model.find_species('vm') assert vm_species assert vm_species is subsystem.vm context = HitCount() # the object and species are optional, this simple test contains single # object and species anyway model.register_mol_wall_hit_callback( wall_hit_callback, context ) for i in range(ITERATIONS + 1): context.current_it = i model.run_iterations(1) model.end_simulation() print("Total number of wall hits: " + str(context.count)) assert context.count == 36045
24.536
91
0.69775
cf94941f042e10124107902ab2e12bca9f262fa6
780
py
Python
wave/synth/wave/wave/base/domain/param.py
jedhsu/wave
a05d8f4b0a96722bdc2f5a514646c7a44681982b
[ "Apache-2.0" ]
null
null
null
wave/synth/wave/wave/base/domain/param.py
jedhsu/wave
a05d8f4b0a96722bdc2f5a514646c7a44681982b
[ "Apache-2.0" ]
null
null
null
wave/synth/wave/wave/base/domain/param.py
jedhsu/wave
a05d8f4b0a96722bdc2f5a514646c7a44681982b
[ "Apache-2.0" ]
null
null
null
from dataclasses import dataclass from typing import Generic, TypeVar from .bounds import Bounds __all__ = ["DomainParameters"] T = TypeVar("T") @dataclass class _DomainParameters(Generic[T]): type: T bounds: Bounds[T] npoints: int class _Validate_(_DomainParameters, Generic[T]): @staticmethod def validate(type_: T, bounds: Bounds[T], npoints: int): assert isinstance(type_, type) bounds.validate_bounds() assert npoints >= 0, "Number of points must be non-negative." class DomainParameters( _Validate_, _DomainParameters, Generic[T], ): def __init__( self, type: T, bounds: Bounds[T], npoints: int, ): super(DomainParameters, self).__init__(type, bounds, npoints)
20.526316
69
0.657692
21266e614097f8cd56839d6664d3f803ae792ecc
1,429
py
Python
resources/mycroft_websocket_template.py
FruityWelsh/mycroft_websocket_pylib
ed90327b469b5a742014c35b6fcd9b28eb8143de
[ "Apache-2.0" ]
1
2020-08-30T23:57:02.000Z
2020-08-30T23:57:02.000Z
resources/mycroft_websocket_template.py
FruityWelsh/mycroft_websocket_pylib
ed90327b469b5a742014c35b6fcd9b28eb8143de
[ "Apache-2.0" ]
null
null
null
resources/mycroft_websocket_template.py
FruityWelsh/mycroft_websocket_pylib
ed90327b469b5a742014c35b6fcd9b28eb8143de
[ "Apache-2.0" ]
null
null
null
#!/bin/env python import json from websocket import create_connection # type: ignore import logging class connect(): def __init__(self, mycroft_addr, mycroft_port, LOGLEVEL=logging.WARN): logging.basicConfig(level=LOGLEVEL) self.url = f"ws://{mycroft_addr}:{mycroft_port}/core" logging.debug(f"Websocket url: {self.url}") try: self.mycroft_connection = create_connection(self.url) logging.debug("Websocket connected to {self.url}") except: self.mycroft_connection.close() raise def _send(self, message): logging.debug(f"Data being sent: {message}") send_status = self.mycroft_connection.send(json.dumps(message)) logging.debug(f"Send status: {send_status}") return send_status def listen(self): message_recevied = self.mycroft_connection.recv() logging.debug(f"message_recevied: {message_recevied}") yield message_recevied def close(self): logging.debug("Mycroft websocket closing") self.mycroft_connection.close() logging.debug("Mycroft websocket closed") def __exit__(self): self.close() def speak(self, utterance): message = {'type': 'speak', 'data': {'utterance': utterance} } send_status = self._send(message) return send_status
32.477273
74
0.622813
6141d3fae084c8befd2abca150c9c8d8e8bb0ed7
1,423
py
Python
configs/config.py
jackchua/nga-ui
da48c6ba97fe88386970bd780bceee4221ea1e9f
[ "MIT" ]
1
2021-01-20T19:14:27.000Z
2021-01-20T19:14:27.000Z
configs/config.py
jackchua/nga-ui
da48c6ba97fe88386970bd780bceee4221ea1e9f
[ "MIT" ]
null
null
null
configs/config.py
jackchua/nga-ui
da48c6ba97fe88386970bd780bceee4221ea1e9f
[ "MIT" ]
1
2021-08-16T10:35:35.000Z
2021-08-16T10:35:35.000Z
import os import json # require that a creds.json file has been created / downloaded from a secure # key store upon deployment curr_dir = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(curr_dir, 'creds.json')) as f: creds = json.loads(f.read()) class Config(object): # test on a local sqllite database SECRET_KEY = b'\x05\xd9.(g\xe1\xbf`\xb1t\xb0n\xeb\xed\x98\xa1' SQLALCHEMY_DATABASE_URI = creds['debug']['db']['uri'] SQLALCHEMY_TRACK_MODIFICATIONS = False ADMIN = {'username': creds['debug']['admin_user']['username'], 'email': creds['debug']['admin_user']['email'], 'password': creds['debug']['admin_user']['password']} # THEME SUPPORT # if set then url_for('static', filename='', theme='') # will add the theme name to the static URL: # /static/<DEFAULT_THEME>/filename # DEFAULT_THEME = "themes/dark" DEFAULT_THEME = None class ProductionConfig(Config): DEBUG = False # use the above postgres database SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{}:{}@{}:{}/{}'.format( creds['production']['db']['username'], creds['production']['db']['password'], creds['production']['db']['host'], creds['production']['db']['port'], 'ef' ) class DebugConfig(Config): DEBUG = True config_dict = { 'Production': ProductionConfig, 'Debug': DebugConfig }
29.645833
76
0.635278
a4b0f10d965c42d0758f2bae9b31b81bb86c543b
3,937
py
Python
model/keypoint_detector.py
netjerikhet/ai-human-emotions
40117813f275b6830581a0e23d9efff40197b668
[ "Apache-2.0" ]
null
null
null
model/keypoint_detector.py
netjerikhet/ai-human-emotions
40117813f275b6830581a0e23d9efff40197b668
[ "Apache-2.0" ]
null
null
null
model/keypoint_detector.py
netjerikhet/ai-human-emotions
40117813f275b6830581a0e23d9efff40197b668
[ "Apache-2.0" ]
1
2022-03-16T16:30:49.000Z
2022-03-16T16:30:49.000Z
from torch import nn import torch import torch.nn.functional as F from model.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d class KPDetector(nn.Module): """ Detecting the keypoints. Return keypoint positions and jacobian near each point """ def __init__(self, block_expansion, num_kp, num_channels, max_features, num_blocks, temperature, estimate_jacobian=False, scale_factor=1, single_jacobian_map=False, pad=0): super(KPDetector, self).__init__() self.predictor = Hourglass(block_expansion, in_features=num_channels, max_features=max_features, num_blocks=num_blocks) self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7,7), padding=pad) if estimate_jacobian: self.num_jacobian_maps = 1 if single_jacobian_map else num_kp self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad) self.jacobian.weight.data.zero_() self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)) else: self.jacobian = None self.temperature = temperature self.scale_factor = scale_factor if self.scale_factor != 1: self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor) def gaussian2kp(self, heatmap): """ Extract the mean and from a heatmap """ shape = heatmap.shape heatmap = heatmap.unsqueeze(-1) grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0) value = (heatmap * grid).sum(dim=(2, 3)) kp = {'value': value} return kp def forward(self, x): if self.scale_factor != 1: x = self.down(x) feature_map = self.predictor(x) prediction = self.kp(feature_map) final_shape = prediction.shape heatmap = prediction.view(final_shape[0], final_shape[1], -1) heatmap = F.softmax(heatmap / self.temperature, dim=2) heatmap = heatmap.view(*final_shape) out = self.gaussian2kp(heatmap) if self.jacobian is not None: jacobian_map = self.jacobian(feature_map) jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 4, final_shape[2], final_shape[3]) heatmap = heatmap.unsqueeze(2) jacobian = heatmap * jacobian_map jacobian = jacobian.view(final_shape[0], final_shape[1], 4, -1) jacobian = jacobian.sum(dim=-1) jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 2, 2) out['jacobian'] = jacobian return out def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new = {k: v for k, v in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new
41.882979
113
0.641605
02d593bb96ff651235229aaef49162d5a42447dc
969
py
Python
modules/exploitation/setoolkit.py
TheRealJoeyCo/ptf
dd1985696ba8d33be46645efd9d086ac48bd42cf
[ "FTL" ]
5
2019-03-14T10:17:22.000Z
2019-10-23T14:04:12.000Z
modules/exploitation/setoolkit.py
yeyintminthuhtut/ptf
53d90661b9e1c372fb6965fb22c63033103d0c13
[ "FTL" ]
null
null
null
modules/exploitation/setoolkit.py
yeyintminthuhtut/ptf
53d90661b9e1c372fb6965fb22c63033103d0c13
[ "FTL" ]
14
2019-03-14T10:34:02.000Z
2021-10-31T17:34:13.000Z
#!/usr/bin/env python ##################################### # Installation module for SET ##################################### # AUTHOR OF MODULE NAME AUTHOR="David Kennedy (ReL1K)" # DESCRIPTION OF THE MODULE DESCRIPTION="This module will install/update the Social-Engineer Toolkit (SET)" # INSTALL TYPE GIT, SVN, FILE DOWNLOAD # OPTIONS = GIT, SVN, FILE INSTALL_TYPE="GIT" # LOCATION OF THE FILE OR GIT/SVN REPOSITORY REPOSITORY_LOCATION="https://github.com/trustedsec/social-engineer-toolkit/" # WHERE DO YOU WANT TO INSTALL IT INSTALL_LOCATION="setoolkit" # DEPENDS FOR DEBIAN INSTALLS DEBIAN="git,build-essential,python-pexpect,python-crypto,python-openssl,rdesktop,screen,apache2,python-pymssql" # DEPENDS FOR FEDORA INSTALLS FEDORA="git,make,automake,gcc,gcc-c++,kernel-devel,python-pexpect,python-crypto,pyOpenSSL" # COMMANDS TO RUN AFTER AFTER_COMMANDS="" # THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL LAUNCHER="setoolkit,seautomate,seproxy"
29.363636
111
0.724458
f43dbd121b5d5a323a02730dc7dd3b53155a9532
5,950
py
Python
src/extract_old_site/modules/data_downloads.py
aychen99/Excavating-Occaneechi-Town
6e864ca69ff1881554eb4c88aebed236bafbeaf4
[ "MIT" ]
1
2020-10-01T01:07:11.000Z
2020-10-01T01:07:11.000Z
src/extract_old_site/modules/data_downloads.py
aychen99/Excavating-Occaneechi-Town
6e864ca69ff1881554eb4c88aebed236bafbeaf4
[ "MIT" ]
null
null
null
src/extract_old_site/modules/data_downloads.py
aychen99/Excavating-Occaneechi-Town
6e864ca69ff1881554eb4c88aebed236bafbeaf4
[ "MIT" ]
null
null
null
from . import standard_text_chapter from bs4 import BeautifulSoup from pathlib import Path import os def process_tab_html_contents( html_strings, current_tab_page_name, current_dir_path, dig_parent_dir_path, readfile, current_body_page_name ): """Turn the raw html_strings from reading a tab.html file into a dict.""" title = standard_text_chapter.extract_page_title(html_strings['contenta_html']) content = standard_text_chapter.extract_page_content( html_strings['contentb_html'], current_dir_path ) page_num_map = { "body0_1.html": "Data 1", "body0_2.html": "Data 2", "body1_1.html": "Data 3", "body2_1.html": "Data 4", "body2_2.html": "Data 5", "body2_3.html": "Data 6", "body3_1.html": "Data 7", "body3_2.html": "Data 8", "body3_3.html": "Data 9", "body3_4.html": "Data 10", "body3_5.html": "Data 11", "body3_6.html": "Data 12", "body3_7.html": "Data 13", "body3_8.html": "Data 14", "body3_9.html": "Data 15", "body3_10.html": "Data 16", } page_num = page_num_map[current_body_page_name] sidebar_info = standard_text_chapter.extract_sidebar( html_strings['sidebar_html'], current_dir_path, html_strings['body_page_name'] ) topbar_info = standard_text_chapter.extract_topbar( html_strings['topbar_html'], current_dir_path, current_tab_page_name ) processed = { "page": { "parentModuleShortTitle": topbar_info['currentModule']['moduleShortName'], "pageNum": page_num, "pageTitle": title, "content": content, }, "module": { "path": topbar_info['currentModule']['path'], "shortTitle": topbar_info['currentModule']['moduleShortName'], "fullTitle": sidebar_info['currentModuleFullName'], "author": sidebar_info['moduleAuthor'], "sections": sidebar_info['sections'] }, "additionalSectionInfo": { "currentSection": sidebar_info['currentSection'], "pageNum": page_num } } return processed def extract_full_module(module_file_names, current_dir_path, dig_parent_dir_path, readfile): """Extract content from one module in a chapter and store in a dict.""" extracted = { "module": {}, "pages": {} } full_current_dir_path = dig_parent_dir_path / ("." + current_dir_path) processed_pages = [] tab_html_str = readfile(module_file_names[0], full_current_dir_path) associated_body_page_names = [] for filename in (Path(dig_parent_dir_path) / ('.' + current_dir_path)).iterdir(): if filename.name.replace('body', '')[0] == module_file_names[0].replace('tab', '')[0]: associated_body_page_names.append(filename) for filename in associated_body_page_names: body_html_contents = standard_text_chapter.get_body_page_html_contents( readfile(filename.name, filename.parent), current_dir_path, dig_parent_dir_path, readfile, has_page_num=False ) extracted_contents = standard_text_chapter.get_tab_page_html_contents( tab_html_str, current_dir_path, dig_parent_dir_path, readfile, has_page_num=False ) extracted_contents['sidebar_html'] = body_html_contents['sidebar_html'] extracted_contents['contenta_html'] = body_html_contents['reporta_html'] extracted_contents['contentb_html'] = body_html_contents['reportb_html'] extracted_contents['body_page_name'] = filename.name processed_page = process_tab_html_contents(extracted_contents, module_file_names[0], current_dir_path, dig_parent_dir_path, readfile, filename.name) processed_pages.append(processed_page) if not standard_text_chapter.validate_tab_html_extraction_results(processed_pages): return "Failed: inconsistency in pages within module " + module_file_names[0] sectionsToPageNums = {} for processed_page in processed_pages: sectionInfo = processed_page['additionalSectionInfo'] pageNumDictKey = (sectionInfo['currentSection']['path'] + '-' + sectionInfo['currentSection']['name']) if pageNumDictKey in sectionsToPageNums: return "Failed: Two sections with the same path + name" sectionsToPageNums[pageNumDictKey] = sectionInfo['pageNum'] extracted['module'] = processed_pages[0]['module'] for processed_page in processed_pages: pageNum = processed_page['page'].pop('pageNum', None) extracted['pages'][pageNum] = processed_page['page'] for section in extracted['module']['sections']: section['pageNum'] = sectionsToPageNums[section['path'] + '-' + section['name']] if len(section['subsections']) > 0: for subsection in section['subsections']: subsection['pageNum'] = sectionsToPageNums[subsection['path'] + '-' + subsection['name']] return extracted def extract_data_downloads(dig_parent_dir, readfile): started_dir_path_obj = Path(dig_parent_dir) / "./dig/html/data" tab_filenames = [] for filepath in started_dir_path_obj.iterdir(): if "tab" in filepath.name and "tabs" not in filepath.name: tab_filenames.append(filepath.name) return standard_text_chapter.extract_full_chapter( tab_filenames, "/dig/html/data", Path(dig_parent_dir), readfile, extract_full_module=extract_full_module )
41.901408
116
0.624538
ed36d1dc9bf59ebac75a0bc575979252ce4c6144
390
py
Python
Day 10/Solution2.py
joll05/AdventOfCode2020
d2d93395cec72095d749a07743b70e7fd6f41297
[ "MIT" ]
null
null
null
Day 10/Solution2.py
joll05/AdventOfCode2020
d2d93395cec72095d749a07743b70e7fd6f41297
[ "MIT" ]
null
null
null
Day 10/Solution2.py
joll05/AdventOfCode2020
d2d93395cec72095d749a07743b70e7fd6f41297
[ "MIT" ]
null
null
null
f=open("input.txt") Input=sorted(map(int, f.read().split("\n"))) Input = [0] + Input f.close() print(Input) combinationCounts = [1] + [0] * (len(Input) - 1) for i, value in enumerate(Input): for j, adapterValue in enumerate(Input[i+1:], start=i+1): if(adapterValue > value + 3): break combinationCounts[j] += combinationCounts[i] print(combinationCounts)
24.375
61
0.628205
fffe104fa44f45d19c2f7c7c9b27d397beb7cbe4
6,666
py
Python
swav/vissl/extra_scripts/convert_caffe2_to_torchvision_resnet.py
lhoestq/DeDLOC
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
[ "Apache-2.0" ]
null
null
null
swav/vissl/extra_scripts/convert_caffe2_to_torchvision_resnet.py
lhoestq/DeDLOC
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
[ "Apache-2.0" ]
null
null
null
swav/vissl/extra_scripts/convert_caffe2_to_torchvision_resnet.py
lhoestq/DeDLOC
36f5a6d043c3d727f9d098a35fba94aa351a5cd4
[ "Apache-2.0" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Convert the ResNet-50 models from ICCV'19 paper https://arxiv.org/abs/1905.01235 to PyTorch TorchVision compatible models. We can directly use these models for benchmarking. Code credits: https://github.com/facebookresearch/fair_self_supervision_benchmark/blob/master/extra_scripts/pickle_caffe2_to_pytorch.py # NOQA """ import argparse import logging import pickle import re import sys from collections import OrderedDict import torch from fvcore.common.file_io import PathManager # create the logger FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s" logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) logger = logging.getLogger(__name__) _C2_STAGE_NAMES = {"R-50": ["1.2", "2.3", "3.5", "4.2"]} def remove_jigsaw_names(data): output_blobs, count = {}, 0 logger.info("Correcting jigsaw model...") remove_suffixes = ["s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8"] for item in sorted(data.keys()): if "s0" in item: out_name = re.sub("_s[0-9]_", "_", item) logger.info("input_name: {} out_name: {}".format(item, out_name)) output_blobs[out_name] = data[item] elif any(x in item for x in remove_suffixes): count += 1 logger.info("Ignoring: {}".format(item)) else: logger.info("adding: {}".format(item)) output_blobs[item] = data[item] logger.info("Original #blobs: {}".format(len(data.keys()))) logger.info("Output #blobs: {}".format(len(output_blobs.keys()))) logger.info("Removed #blobs: {}".format(count)) return output_blobs def _rename_basic_resnet_weights(layer_keys): layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [k.replace(".w", ".weight") for k in layer_keys] layer_keys = [k.replace(".bn", "_bn") for k in layer_keys] layer_keys = [k.replace(".b", ".bias") for k in layer_keys] layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys] layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys] layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys] # Affine-Channel -> BatchNorm enaming layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys] layer_keys = [k.replace("_bn.rm", "_bn.running_mean") for k in layer_keys] layer_keys = [k.replace("_bn.riv", "_bn.running_var") for k in layer_keys] # Make torchvision-compatible layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys] layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys] layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys] layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys] layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys] layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys] layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys] return layer_keys def _rename_weights_for_resnet(weights, stage_names): original_keys = sorted(weights.keys()) layer_keys = sorted(weights.keys()) # for X-101, rename output to fc1000 to avoid conflicts afterwards layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys] layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys] # performs basic renaming: _ -> . , etc layer_keys = _rename_basic_resnet_weights(layer_keys) key_map = {k: v for k, v in zip(original_keys, layer_keys)} logger.info("Remapping C2 weights") max_c2_key_size = max(len(k) for k in original_keys if "_momentum" not in k) new_weights = OrderedDict() for k in original_keys: v = weights[k] if "_momentum" in k: continue if "pred" in k: continue if k == "lr": continue if k == "model_iter": continue w = torch.from_numpy(v) logger.info( "C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]) ) new_weights[key_map[k]] = w logger.info("Number of blobs: {}".format(len(new_weights))) return new_weights def _load_c2_pickled_weights(file_path): with PathManager.open(file_path, "rb") as f: data = pickle.load(f, encoding="latin1") if "blobs" in data: weights = data["blobs"] else: weights = data return weights def convert_bgr2rgb(state_dict): w = state_dict["conv1_w"] # (64, 3, 7, 7) assert w.shape == (64, 3, 7, 7), "Error in converting bgr2rgb" w = w[:, ::-1, :, :] state_dict["conv1_w"] = w.copy() logger.info("BGR ===> RGB for conv1_w.") return state_dict def main(): parser = argparse.ArgumentParser(description="Convert C2 model to Pytorch") parser.add_argument( "--c2_model", type=str, default=None, help="Path to c2 RN-50 model" ) parser.add_argument( "--output_model", type=str, default=None, help="Path to save torch RN-50 model" ) parser.add_argument( "--bgr2rgb", dest="bgr2rgb", default=False, help="Revert bgr (openCV) order to rgb (PIL) order", ) parser.add_argument( "--jigsaw", type=bool, default=False, help="Whether jigsaw model or not" ) parser.add_argument("--arch", type=str, default="R-50", help="R-50") args = parser.parse_args() stages = _C2_STAGE_NAMES[args.arch] # load the caffe2 model weights state_dict = _load_c2_pickled_weights(args.c2_model) # for the pretext model from jigsaw, special processing if args.jigsaw: state_dict = remove_jigsaw_names(state_dict) # depending on the image reading library, we convert the weights to be # compatible order. The default order of caffe2 weights is BGR (openCV). if args.bgr2rgb: state_dict = convert_bgr2rgb(state_dict) state_dict = _rename_weights_for_resnet(state_dict, stages) logger.info("Saving converted weights to: {}".format(args.output_model)) torch.save(state_dict, args.output_model) logger.info("Done!!") if __name__ == "__main__": main()
36.228261
128
0.648215
184f6ce7473af946aae8267acae8069384374145
2,199
py
Python
opendatatools/common/date_util.py
solider245/OpenData
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
[ "Apache-2.0" ]
1,179
2018-05-28T07:14:41.000Z
2022-03-27T16:03:51.000Z
opendatatools/common/date_util.py
taoyeah/OpenData
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
[ "Apache-2.0" ]
42
2018-07-05T02:44:56.000Z
2022-03-29T12:12:30.000Z
opendatatools/common/date_util.py
taoyeah/OpenData
031aa29b7b6b26a903f378e3da10520fd3a1b7ab
[ "Apache-2.0" ]
297
2018-05-28T07:39:38.000Z
2022-03-28T02:35:59.000Z
#-*- coding:utf-8 -*- import datetime import calendar lastday_map = {} def get_current_day(format = "%Y-%m-%d"): curr_date = datetime.datetime.now() return datetime.datetime.strftime(curr_date, format) def date_convert(date, format, target_format): return datetime.datetime.strftime(datetime.datetime.strptime(date, format), target_format) def get_month_firstday_and_lastday(year=None, month=None): """ :param year: 年份,默认是本年,可传int或str类型 :param month: 月份,默认是本月,可传int或str类型 :return: firstDay: 当月的第一天,datetime.date类型 lastDay: 当月的最后一天,datetime.date类型 """ if year: year = int(year) else: year = datetime.date.today().year if month: month = int(month) else: month = datetime.date.today().month # 获取当月第一天的星期和当月的总天数 firstDayWeekDay, monthRange = calendar.monthrange(year, month) # 获取当月的第一天 firstDay = datetime.date(year=year, month=month, day=1) lastDay = datetime.date(year=year, month=month, day=monthRange) return firstDay, lastDay def get_month_lastday(datestr, format = "%Y-%m-%d"): if datestr in lastday_map: return lastday_map[datestr] date = datetime.datetime.strptime(datestr, format) year = date.year month = date.month firstDay, lastDay = get_month_firstday_and_lastday(year, month) result = datetime.datetime.strftime(lastDay, format) lastday_map[datestr] = result return result def get_target_date2(date, span, format="%Y-%m-%d"): curr_date = datetime.datetime.strptime(date, format) span_days = datetime.timedelta(days=span) target = curr_date + span_days return datetime.datetime.strftime(target, format) def get_target_date(span, format="%Y-%m-%d"): today = datetime.date.today() span_days = datetime.timedelta(days=span) target = today + span_days return datetime.datetime.strftime(target, format) def split_date(datestr, format = "%Y-%m-%d"): date = datetime.datetime.strptime(datestr, format) return date.year, date.month, date.day if __name__ == '__main__': print(get_target_date(0)) print(get_target_date(10)) print(get_target_date(-1))
29.32
94
0.686676
e517ca3f62f459291db019e296a796186eaf7830
882
py
Python
weatbag/tiles/n1w2.py
takluyver/weatbag
0b231aada9b01491d44ad487a4aacdaca71cc633
[ "MIT" ]
6
2015-07-18T08:25:41.000Z
2019-10-29T21:27:13.000Z
weatbag/tiles/n1w2.py
takluyver/weatbag
0b231aada9b01491d44ad487a4aacdaca71cc633
[ "MIT" ]
3
2015-02-21T01:03:41.000Z
2019-10-30T13:38:51.000Z
weatbag/tiles/n1w2.py
takluyver/weatbag
0b231aada9b01491d44ad487a4aacdaca71cc633
[ "MIT" ]
8
2015-02-20T19:19:20.000Z
2019-10-29T21:27:25.000Z
class Tile: map_word = "Island beach" def describe(self): print("The beach is really quiet and you can hear and see the seagulls " "on the sea.\n") def action(self, player, do): print("What is this gibberish?") def leave(self, player, direction): if direction == "e": print("You can't go back by swimming, that part is full of " "electric eels.\n") return False elif direction == "s": print("I'm afraid I can't let you go there Dave.") return False elif direction == "w": print("The sandy beach is transforming into big rocks but it looks " "like you are able to climb them.\n" "Just be careful because they are a bit slipery.") return True else: return True
33.923077
80
0.539683
08684b014918b11df3a5020e00477e2372ab08a1
1,506
py
Python
prepare.py
gtadam/prepare-kubernetes-deployment-action
171c77e921dd3fd6750aa0956552c288a29711c8
[ "MIT" ]
null
null
null
prepare.py
gtadam/prepare-kubernetes-deployment-action
171c77e921dd3fd6750aa0956552c288a29711c8
[ "MIT" ]
null
null
null
prepare.py
gtadam/prepare-kubernetes-deployment-action
171c77e921dd3fd6750aa0956552c288a29711c8
[ "MIT" ]
null
null
null
import argparse import yaml import json parser = argparse.ArgumentParser( description="Preparation of values file to deploy a kubernetes service." ) parser.add_argument( "--service-name", dest="service_name", help="The name of the service to deploy" ) parser.add_argument( "--service-replica-count", type=int, dest="replica_count", help="The number of replicas of the service to deploy" ) parser.add_argument( "--image-name", dest="image_name", help="The name of the image to deploy" ) parser.add_argument( "--image-namespace", dest="image_namespace", help="The namespace of the image to deploy" ) parser.add_argument( "--version", dest="version", help='The version of the application to deploy' ) parser.add_argument( "--service-envvars", dest="service_envvars", help='The environment variables that are deployed with the service' ) args = parser.parse_args() values = {"service": {"name": args.service_name, "replicaCount": args.replica_count, "image": {"name": args.image_name, "namespace": args.image_namespace}, "env": json.loads(args.service_envvars)}} with open("/service/values.yaml", "w") as output_file: yaml.dump(values, output_file) with open("/service/Chart.yaml", "r+") as chart_file: yaml_file = yaml.full_load(chart_file) yaml_file["appVersion"] = args.version yaml_file["version"] = args.version chart_file.seek(0) chart_file.truncate() yaml.dump(yaml_file, chart_file)
25.1
197
0.698539
88e82b4b91f54045c0a921fb0947ceca1916cc88
6,396
py
Python
flsim/servers/tests/test_aggregator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
79
2021-12-09T18:05:09.000Z
2022-03-23T20:43:46.000Z
flsim/servers/tests/test_aggregator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
11
2021-12-30T17:54:04.000Z
2022-03-23T17:23:00.000Z
flsim/servers/tests/test_aggregator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
9
2021-12-09T19:55:22.000Z
2022-03-15T00:02:08.000Z
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from tempfile import mkstemp import pytest import torch.distributed as dist import torch.multiprocessing as mp from flsim.common.pytest_helper import ( assertEqual, assertNotEqual, assertAlmostEqual, assertEmpty, ) from flsim.servers.aggregator import Aggregator, AggregationType from flsim.tests.utils import ( create_model_with_value, model_parameters_equal_to_value, ) from flsim.utils.distributed.fl_distributed import FLDistributedUtils, OperationType def init_process( rank, world_size, aggregator, models, file_loc, pipe, distributed_op, ): FLDistributedUtils.dist_init( rank=rank, world_size=world_size, init_method=f"file://{file_loc}", use_cuda=False, ) aggregator.zero_weights() for i, m in enumerate(models): if i % world_size == rank: weight = i + 1 aggregator.apply_weight_to_update(delta=m, weight=weight) aggregator.add_update(m, weight=weight) module = aggregator.aggregate(distributed_op) sums, weights = 0.0, 0.0 all_sum = [(p.sum(), p.numel()) for p in module.parameters()] for s, w in all_sum: sums += float(s) weights += float(w) pipe.send(sums / weights) dist.destroy_process_group() def run_multiprocess_aggregation_test( aggregator, num_processes=1, num_models=4, distributed_op=OperationType.SUM_AND_BROADCAST, ): _, tmpfile = mkstemp(dir="/tmp") pipe_out, pipe_in = mp.Pipe(False) models = [create_model_with_value(1.0) for i in range(num_models)] processes = [] results = [] FLDistributedUtils.WORLD_SIZE = num_processes for pid in range(num_processes): p = mp.Process( target=init_process, args=( pid, num_processes, aggregator, models, tmpfile, pipe_in, distributed_op, ), ) p.start() processes.append(p) results.append(pipe_out) for p in processes: p.join() res = [r.recv() for r in results] return res AGGREGATION_TYPES = [ AggregationType.AVERAGE, AggregationType.WEIGHTED_AVERAGE, AggregationType.SUM, AggregationType.WEIGHTED_SUM, ] class TestAggregator: def test_zero_weights(self) -> None: model = create_model_with_value(0) ag = Aggregator(module=model, aggregation_type=AggregationType.AVERAGE) weight = 1.0 steps = 5 for _ in range(steps): delta = create_model_with_value(1.0) ag.apply_weight_to_update(delta=delta, weight=weight) ag.add_update(delta=delta, weight=weight) assertEqual(ag.sum_weights.item(), weight * steps) ag.zero_weights() assertEqual(ag.sum_weights.item(), 0) @pytest.mark.parametrize( "agg_type,num_process,num_models,expected_value", [ (AggregationType.AVERAGE, 4, 10, 1.0), (AggregationType.WEIGHTED_AVERAGE, 4, 10, 1.0), (AggregationType.WEIGHTED_SUM, 4, 10, 55.0), (AggregationType.SUM, 4, 10, 10.0), ], ) def test_multiprocess_aggregation( self, agg_type, num_process, num_models, expected_value ): model = create_model_with_value(0) ag = Aggregator(module=model, aggregation_type=agg_type) results = run_multiprocess_aggregation_test( ag, num_processes=num_process, num_models=num_models ) for result in results: assertAlmostEqual(result, expected_value, places=5) @pytest.mark.parametrize( "agg_type,expected_value", [ (AggregationType.AVERAGE, 1.0), (AggregationType.WEIGHTED_AVERAGE, 1.0), (AggregationType.WEIGHTED_SUM, 55.0), (AggregationType.SUM, 10.0), ], ) def test_aggregate(self, agg_type, expected_value): model = create_model_with_value(0) ag = Aggregator(module=model, aggregation_type=agg_type) ag.zero_weights() for i in range(10): delta = create_model_with_value(1.0) weight = i + 1 ag.apply_weight_to_update(delta=delta, weight=weight) ag.add_update(delta=delta, weight=weight) model = ag.aggregate() error_msg = model_parameters_equal_to_value(model, expected_value) assertEmpty(error_msg, msg=error_msg) @pytest.mark.parametrize( "agg_type,expected_value", [ (AggregationType.AVERAGE, 10.0), (AggregationType.WEIGHTED_AVERAGE, 55.0), (AggregationType.WEIGHTED_SUM, 55.0), (AggregationType.SUM, 10.0), ], ) def test_add_update(self, agg_type, expected_value): model = create_model_with_value(0) ag = Aggregator(module=model, aggregation_type=agg_type) ag.zero_weights() for i in range(10): delta = create_model_with_value(1.0) weight = i + 1 ag.apply_weight_to_update(delta=delta, weight=weight) ag.add_update(delta=delta, weight=weight) assertEqual(ag.sum_weights.item(), expected_value) @pytest.mark.parametrize( "agg_type,dist_op", [ (AggregationType.AVERAGE, OperationType.SUM), (AggregationType.WEIGHTED_AVERAGE, OperationType.SUM), (AggregationType.WEIGHTED_SUM, OperationType.SUM), (AggregationType.SUM, OperationType.SUM), ], ) def test_distributed_op_aggregation(self, agg_type, dist_op): """ Test aggregation with only SUM and no BROADCAST then each worker should have different parameters. """ model = create_model_with_value(0) ag = Aggregator(module=model, aggregation_type=agg_type) results = run_multiprocess_aggregation_test( ag, num_processes=4, num_models=10, distributed_op=dist_op, ) for r, v in zip(results, results[1:]): assertNotEqual(r, v)
30.457143
84
0.628831
1a940ce412411cb8216a95f805badec01ed25398
2,525
py
Python
python/attacks/selective_universal.py
rwightman/pytorch-nips2017-adversarial
1727494ea3bfcbc3b4754b35096e816e1269ff38
[ "Apache-2.0" ]
17
2018-02-05T15:09:01.000Z
2022-03-15T06:27:07.000Z
python/attacks/selective_universal.py
rwightman/pytorch-nips2017-adversarial
1727494ea3bfcbc3b4754b35096e816e1269ff38
[ "Apache-2.0" ]
1
2019-03-03T05:30:38.000Z
2019-03-08T04:44:39.000Z
python/attacks/selective_universal.py
rwightman/pytorch-nips2017-adversarial
1727494ea3bfcbc3b4754b35096e816e1269ff38
[ "Apache-2.0" ]
2
2019-07-26T07:17:09.000Z
2019-10-16T03:44:02.000Z
import os import numpy as np from scipy.misc import imsave import torch import torch.autograd as autograd import torch.utils.data as data import torchvision.transforms as transforms from processing import Mirror class SelectiveUniversal(object): def __init__(self, target_ensemble, w_matrix_files, max_epsilon=16, try_mirrors=False): super(SelectiveUniversal, self).__init__() self.target_ensemble = target_ensemble self.w_matrix_files = w_matrix_files self.max_epsilon = max_epsilon self.nllloss = torch.nn.NLLLoss().cuda() self.w_matrices = [torch.tanh(torch.FloatTensor((np.load(f))).cuda()) for f in self.w_matrix_files] if try_mirrors: self.mirrors = [lambda x: x, Mirror()] self.is_mirror = [False, True] else: self.mirrors = [lambda x: x] self.is_mirror = [False] def __call__(self, input, target, batch_idx, deadline_time): eps = self.max_epsilon / 255.0 input = input.cuda() input_var = autograd.Variable(input, volatile=False, requires_grad=False) log_probs_var = self.target_ensemble(input_var) log_probs = log_probs_var.data.cpu().numpy() pred_class = np.argsort(log_probs, axis=1)[:, -1] pred_class_var = autograd.Variable(torch.LongTensor(pred_class)).cuda() best_loss = 9999.0 best_perturbed = None best_is_fooled = False for w_id, w_matrix in enumerate(self.w_matrices): w_matrix_var = autograd.Variable(w_matrix, requires_grad=False) for func, is_mirrored in zip(self.mirrors, self.is_mirror): perturbed = input_var + func(eps * w_matrix_var) clamped = torch.clamp(perturbed, 0.0, 1.0) log_probs_perturbed_var = self.target_ensemble(clamped) loss = -self.nllloss(log_probs_perturbed_var, target=pred_class_var).data.cpu().numpy() if loss < best_loss: best_loss = loss best_perturbed = clamped.data.cpu().numpy() log_probs = log_probs_perturbed_var.data.cpu().numpy() top_class = np.argsort(log_probs, axis=1)[:, -1] if top_class != pred_class: best_is_fooled = True else: best_is_fooled = False return best_perturbed, None, best_is_fooled
37.132353
107
0.610297
b4f22c9519bfa112160224a733f6c5905b7afd5b
555
py
Python
reservoirpy/wellproductivitypy/pi/__init__.py
scuervo91/reservoirpy
a4db620baf3ff66a85c7f61b1919713a8642e6fc
[ "MIT" ]
16
2020-05-07T01:57:04.000Z
2021-11-27T12:45:59.000Z
reservoirpy/wellproductivitypy/pi/__init__.py
scuervo91/reservoirpy
a4db620baf3ff66a85c7f61b1919713a8642e6fc
[ "MIT" ]
null
null
null
reservoirpy/wellproductivitypy/pi/__init__.py
scuervo91/reservoirpy
a4db620baf3ff66a85c7f61b1919713a8642e6fc
[ "MIT" ]
5
2020-05-12T07:28:24.000Z
2021-12-10T21:24:59.000Z
from .inflow import OilInflow, oil_inflow_curve, oil_j, gas_inflow_curve, gas_j, GasInflow from .outflow import (gas_pressure_profile, gas_outflow_curve, gas_pressure_profile_correlation, potential_energy_change,kinetic_energy_change,frictional_pressure_drop, one_phase_pressure_profile,flow_regime_plot,hb_correlation,two_phase_pressure_profile, two_phase_outflow_curve, gray_correlation, two_phase_upward_pressure,gas_upward_pressure) from .als import Als from .jet_pump import JetPump, nozzle_flow, minimum_suction_area from .esp import Esp
69.375
96
0.87027
56d6e6bf4e849c83038cd523bf4f7ddc922b8d8a
13,675
py
Python
shaptools/shapcli.py
Simranpal/shaptools
1951d234b51114379befab11c99c92dfa179a83f
[ "Apache-2.0" ]
10
2019-03-21T09:27:19.000Z
2022-03-30T13:46:30.000Z
shaptools/shapcli.py
Simranpal/shaptools
1951d234b51114379befab11c99c92dfa179a83f
[ "Apache-2.0" ]
22
2019-02-22T14:50:25.000Z
2022-02-02T16:39:32.000Z
shaptools/shapcli.py
Simranpal/shaptools
1951d234b51114379befab11c99c92dfa179a83f
[ "Apache-2.0" ]
10
2019-03-12T15:55:07.000Z
2021-06-23T11:45:23.000Z
""" Code to expose some useful methods using the command line :author: xarbulu :organization: SUSE LLC :contact: [email protected] :since: 2019-07-11 """ import logging import argparse import json from shaptools import hana PROG = 'shapcli' LOGGING_FORMAT = '%(message)s' class DecodedFormatter(logging.Formatter): """ Custom formatter to remove the b'' from the logged text """ def format(self, record): message = super(DecodedFormatter, self).format(record) if message.startswith('b\''): message = message.split('\'')[1] return message class ConfigData(object): """ Class to store the required configuration data """ def __init__(self, data_dict, logger): try: self.sid = data_dict['sid'] self.instance = data_dict['instance'] self.password = data_dict['password'] self.remote = data_dict.get('remote', None) except KeyError as err: logger.error(err) logger.error('Configuration file must have the sid, instance and password entries') raise def setup_logger(level): """ Setup logging """ logger = logging.getLogger() handler = logging.StreamHandler() formatter = DecodedFormatter(LOGGING_FORMAT) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level=level) return logger def parse_arguments(): """ Parse command line arguments """ parser = argparse.ArgumentParser(PROG) parser.add_argument( '-v', '--verbosity', help='Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)') parser.add_argument( '-r', '--remote', help='Run the command in other machine using ssh') parser.add_argument( '-c', '--config', help='JSON configuration file with SAP HANA instance data (sid, instance and password)') parser.add_argument( '-s', '--sid', help='SAP HANA sid') parser.add_argument( '-i', '--instance', help='SAP HANA instance') parser.add_argument( '-p', '--password', help='SAP HANA password') subcommands = parser.add_subparsers( title='subcommands', description='valid subcommands', help='additional help') hana_subparser = subcommands.add_parser( 'hana', help='Commands to interact with SAP HANA databse') sr_subparser = subcommands.add_parser( 'sr', help='Commands to interact with SAP HANA system replication') parse_hana_arguments(hana_subparser) parse_sr_arguments(sr_subparser) args = parser.parse_args() return parser, args def parse_hana_arguments(hana_subparser): """ Parse hana subcommand arguements """ subcommands = hana_subparser.add_subparsers( title='hana', dest='hana', help='Commands to interact with SAP HANA databse') subcommands.add_parser( 'is_running', help='Check if SAP HANA database is running') subcommands.add_parser( 'version', help='Show SAP HANA database version') subcommands.add_parser( 'start', help='Start SAP HANA database') subcommands.add_parser( 'stop', help='Stop SAP HANA database') subcommands.add_parser( 'info', help='Show SAP HANA database information') subcommands.add_parser( 'kill', help='Kill all SAP HANA database processes') subcommands.add_parser( 'overview', help='Show SAP HANA database overview') subcommands.add_parser( 'landscape', help='Show SAP HANA database landscape') subcommands.add_parser( 'uninstall', help='Uninstall SAP HANA database instance') dummy = subcommands.add_parser( 'dummy', help='Get data from DUMMY table') dummy.add_argument( '--key_name', help='Keystore to connect to sap hana db '\ '(if this value is set user, password and database are omitted') dummy.add_argument( '--user_name', help='User to connect to sap hana db') dummy.add_argument( '--user_password', help='Password to connect to sap hana db') dummy.add_argument( '--database', help='Database name to connect') hdbsql = subcommands.add_parser( 'hdbsql', help='Run a sql command with hdbsql') hdbsql.add_argument( '--key_name', help='Keystore to connect to sap hana db '\ '(if this value is set user, password and database are omitted') hdbsql.add_argument( '--user_name', help='User to connect to sap hana db') hdbsql.add_argument( '--user_password', help='Password to connect to sap hana db') hdbsql.add_argument( '--database', help='Database name to connect') hdbsql.add_argument( '--query', help='Query to execute') user_key = subcommands.add_parser( 'user', help='Create a new user key') user_key.add_argument( '--key_name', help='Key name', required=True) user_key.add_argument( '--environment', help='Database location (host:port)', required=True) user_key.add_argument( '--user_name', help='User to connect to sap hana db', required=True) user_key.add_argument( '--user_password', help='Password to connect to sap hana db', required=True) user_key.add_argument( '--database', help='Database name to connect', required=True) backup = subcommands.add_parser( 'backup', help='Create node backup') backup.add_argument( '--name', help='Backup file name', required=True) backup.add_argument( '--database', help='Database name to connect', required=True) backup.add_argument( '--key_name', help='Key name') backup.add_argument( '--user_name', help='User to connect to sap hana db') backup.add_argument( '--user_password', help='Password to connect to sap hana db') def parse_sr_arguments(sr_subparser): """ Parse hana sr subcommand arguements """ subcommands = sr_subparser.add_subparsers( title='sr', dest='sr', help='Commands to interact with SAP HANA system replication') state = subcommands.add_parser( 'state', help='Show SAP HANA system replication state') state.add_argument('--sapcontrol', help='Run with sapcontrol', action='store_true') status = subcommands.add_parser( 'status', help='Show SAP HANAsystem replication status') status.add_argument('--sapcontrol', help='Run with sapcontrol', action='store_true') subcommands.add_parser( 'disable', help='Disable SAP HANA system replication (to be executed in Primary node)') cleanup = subcommands.add_parser( 'cleanup', help='Cleanup SAP HANA system replication') cleanup.add_argument('--force', help='Force the cleanup', action='store_true') subcommands.add_parser( 'takeover', help='Perform a takeover operation (to be executed in Secondary node)') enable = subcommands.add_parser( 'enable', help='Enable SAP HANA system replication primary site') enable.add_argument('--name', help='Primary site name', required=True) register = subcommands.add_parser( 'register', help='Register SAP HANA system replication secondary site') register.add_argument('--name', help='Secondary site name', required=True) register.add_argument('--remote_host', help='Primary site hostname', required=True) register.add_argument( '--remote_instance', help='Primary site SAP HANA instance number', required=True) register.add_argument( '--replication_mode', help='System replication replication mode', default='sync') register.add_argument( '--operation_mode', help='System replication operation mode', default='logreplay') unregister = subcommands.add_parser( 'unregister', help='Unegister SAP HANA system replication secondary site') unregister.add_argument('--name', help='Primary site name', required=True) copy_ssfs = subcommands.add_parser( 'copy_ssfs', help='Copy current node ssfs files to other host') copy_ssfs.add_argument('--remote_host', help='Other host name', required=True) copy_ssfs.add_argument( '--remote_password', help='Other host SAP HANA instance password (sid and instance must match '\ 'with the current host)', required=True) # pylint:disable=W0212 def uninstall(hana_instance, logger): """ Uninstall SAP HANA database instance """ logger.info( 'This command will uninstall SAP HANA instance '\ 'with sid %s and instance number %s (y/n): ', hana_instance.sid, hana_instance.inst) response = input() if response == 'y': user = hana.HanaInstance.sidadm_user(sid=hana_instance.sid) hana_instance.uninstall(user, hana_instance._password) else: logger.info('Command execution canceled') def run_hdbsql(hana_instance, hana_args, cmd): """ Run hdbsql command """ hdbsql_cmd = hana_instance._hdbsql_connect( key_name=hana_args.key_name, user_name=hana_args.user_name, user_password=hana_args.user_password) cmd = '{hdbsql_cmd} {database}\\"{cmd}\\"'.format( hdbsql_cmd=hdbsql_cmd, database='-d {} '.format(hana_args.database) if hana_args.database else '', cmd=cmd) hana_instance._run_hana_command(cmd) def run_hana_subcommands(hana_instance, hana_args, logger): """ Run hana subcommands """ str_args = hana_args.hana if str_args == 'is_running': result = hana_instance.is_running() logger.info('SAP HANA database running state: %s', result) elif str_args == 'version': hana_instance.get_version() elif str_args == 'start': hana_instance.start() elif str_args == 'stop': hana_instance.stop() elif str_args == 'info': hana_instance._run_hana_command('HDB info') elif str_args == 'kill': hana_instance._run_hana_command('HDB kill-9') elif str_args == 'overview': hana_instance._run_hana_command('HDBSettings.sh systemOverview.py') elif str_args == 'landscape': hana_instance._run_hana_command('HDBSettings.sh landscapeHostConfiguration.py') elif str_args == 'uninstall': uninstall(hana_instance, logger) elif str_args == 'dummy': run_hdbsql(hana_instance, hana_args, 'SELECT * FROM DUMMY') elif str_args == 'hdbsql': run_hdbsql(hana_instance, hana_args, hana_args.query) elif str_args == 'user': hana_instance.create_user_key( hana_args.key_name, hana_args.environment, hana_args.user_name, hana_args.user_password, hana_args.database) elif str_args == 'backup': hana_instance.create_backup( hana_args.database, hana_args.name, hana_args.key_name, hana_args.user_name, hana_args.user_password) def run_sr_subcommands(hana_instance, sr_args, logger): """ Run hana subcommands """ str_args = sr_args.sr if str_args == 'state': # hana_instance.get_sr_state() cmd = 'hdbnsutil -sr_state{}'.format(' --sapcontrol=1' if sr_args.sapcontrol else '') hana_instance._run_hana_command(cmd) elif str_args == 'status': # hana_instance.get_sr_status() cmd = 'HDBSettings.sh systemReplicationStatus.py{}'.format( ' --sapcontrol=1' if sr_args.sapcontrol else '') hana_instance._run_hana_command(cmd, exception=False) elif str_args == 'disable': hana_instance.sr_disable_primary() elif str_args == 'cleanup': hana_instance.sr_cleanup(sr_args.force) elif str_args == 'takeover': hana_instance._run_hana_command('hdbnsutil -sr_takeover') elif str_args == 'enable': hana_instance.sr_enable_primary(sr_args.name) elif str_args == 'register': hana_instance.sr_register_secondary( sr_args.name, sr_args.remote_host, sr_args.remote_instance, sr_args.replication_mode, sr_args.operation_mode) elif str_args == 'unregister': hana_instance.sr_unregister_secondary(sr_args.name) elif str_args == 'copy_ssfs': hana_instance.copy_ssfs_files(sr_args.remote_host, sr_args.remote_password) def load_config_file(config_file, logger): """ Load configuration file data """ with open(config_file, 'r') as f_ptr: json_data = json.load(f_ptr) return json_data # pylint:disable=W0212 def run(): """ Main execution """ parser, args = parse_arguments() logger = setup_logger(args.verbosity or logging.DEBUG) # If -c or --config flag is received data is loaded from the configuration file if args.config: data = load_config_file(args.config, logger) config_data = ConfigData(data, logger) elif args.sid and args.instance and args.password: config_data = ConfigData(vars(args), logger) else: logger.info( 'Configuration file or sid, instance and passwords parameters must be provided\n') parser.print_help() exit(1) if args.remote: config_data.remote = args.remote try: hana_instance = hana.HanaInstance( config_data.sid, config_data.instance, config_data.password, remote_host=config_data.remote) if vars(args).get('hana'): run_hana_subcommands(hana_instance, args, logger) elif vars(args).get('sr'): run_sr_subcommands(hana_instance, args, logger) else: parser.print_help() except Exception as err: logger.error(err) exit(1) if __name__ == "__main__": # pragma: no cover run()
36.760753
96
0.665448
ca8199d0086f2ce0e285fa8990b905b20d5dcb3c
5,487
py
Python
oauthlib/openid/connect/core/endpoints/pre_configured.py
mgorny/oauthlib
b71636e85f845b79b5a56de6480fcba9d1415720
[ "BSD-3-Clause" ]
null
null
null
oauthlib/openid/connect/core/endpoints/pre_configured.py
mgorny/oauthlib
b71636e85f845b79b5a56de6480fcba9d1415720
[ "BSD-3-Clause" ]
null
null
null
oauthlib/openid/connect/core/endpoints/pre_configured.py
mgorny/oauthlib
b71636e85f845b79b5a56de6480fcba9d1415720
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ oauthlib.openid.connect.core.endpoints.pre_configured ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of various endpoints needed for providing OpenID Connect servers. """ from oauthlib.oauth2.rfc6749.endpoints import ( AuthorizationEndpoint, IntrospectEndpoint, ResourceEndpoint, RevocationEndpoint, TokenEndpoint ) from oauthlib.oauth2.rfc6749.grant_types import ( AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant, ImplicitGrant as OAuth2ImplicitGrant, ClientCredentialsGrant, RefreshTokenGrant, ResourceOwnerPasswordCredentialsGrant ) from oauthlib.oauth2.rfc6749.tokens import BearerToken from ..grant_types import ( AuthorizationCodeGrant, ImplicitGrant, HybridGrant, ) from ..grant_types.dispatchers import ( AuthorizationCodeGrantDispatcher, ImplicitTokenGrantDispatcher, AuthorizationTokenGrantDispatcher ) from ..tokens import JWTToken from .userinfo import UserInfoEndpoint class Server(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint, ResourceEndpoint, RevocationEndpoint, UserInfoEndpoint): """An all-in-one endpoint featuring all four major grant types.""" def __init__(self, request_validator, token_expires_in=None, token_generator=None, refresh_token_generator=None, *args, **kwargs): """Construct a new all-grants-in-one server. :param request_validator: An implementation of oauthlib.oauth2.RequestValidator. :param token_expires_in: An int or a function to generate a token expiration offset (in seconds) given a oauthlib.common.Request object. :param token_generator: A function to generate a token from a request. :param refresh_token_generator: A function to generate a token from a request for the refresh token. :param kwargs: Extra parameters to pass to authorization-, token-, resource-, and revocation-endpoint constructors. """ self.auth_grant = OAuth2AuthorizationCodeGrant(request_validator) self.implicit_grant = OAuth2ImplicitGrant(request_validator) self.password_grant = ResourceOwnerPasswordCredentialsGrant( request_validator) self.credentials_grant = ClientCredentialsGrant(request_validator) self.refresh_grant = RefreshTokenGrant(request_validator) self.openid_connect_auth = AuthorizationCodeGrant(request_validator) self.openid_connect_implicit = ImplicitGrant(request_validator) self.openid_connect_hybrid = HybridGrant(request_validator) self.bearer = BearerToken(request_validator, token_generator, token_expires_in, refresh_token_generator) self.jwt = JWTToken(request_validator, token_generator, token_expires_in, refresh_token_generator) self.auth_grant_choice = AuthorizationCodeGrantDispatcher(default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth) self.implicit_grant_choice = ImplicitTokenGrantDispatcher(default_grant=self.implicit_grant, oidc_grant=self.openid_connect_implicit) # See http://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#Combinations for valid combinations # internally our AuthorizationEndpoint will ensure they can appear in any order for any valid combination AuthorizationEndpoint.__init__(self, default_response_type='code', response_types={ 'code': self.auth_grant_choice, 'token': self.implicit_grant_choice, 'id_token': self.openid_connect_implicit, 'id_token token': self.openid_connect_implicit, 'code token': self.openid_connect_hybrid, 'code id_token': self.openid_connect_hybrid, 'code id_token token': self.openid_connect_hybrid, 'none': self.auth_grant }, default_token_type=self.bearer) self.token_grant_choice = AuthorizationTokenGrantDispatcher(request_validator, default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth) TokenEndpoint.__init__(self, default_grant_type='authorization_code', grant_types={ 'authorization_code': self.token_grant_choice, 'password': self.password_grant, 'client_credentials': self.credentials_grant, 'refresh_token': self.refresh_grant, }, default_token_type=self.bearer) ResourceEndpoint.__init__(self, default_token='Bearer', token_types={'Bearer': self.bearer, 'JWT': self.jwt}) RevocationEndpoint.__init__(self, request_validator) IntrospectEndpoint.__init__(self, request_validator) UserInfoEndpoint.__init__(self, request_validator)
50.805556
154
0.63696
88e45ca30ef86616bf1596a563d7dd615cdb9d24
918
py
Python
backend/app/literature/routers/resource_descriptor_router.py
alliance-genome/agr_literature_service
2278316422d5c3ab65e21bb97d91e861e48853c5
[ "MIT" ]
null
null
null
backend/app/literature/routers/resource_descriptor_router.py
alliance-genome/agr_literature_service
2278316422d5c3ab65e21bb97d91e861e48853c5
[ "MIT" ]
39
2021-10-18T17:02:49.000Z
2022-03-28T20:56:24.000Z
backend/app/literature/routers/resource_descriptor_router.py
alliance-genome/agr_literature_service
2278316422d5c3ab65e21bb97d91e861e48853c5
[ "MIT" ]
1
2021-10-21T00:11:18.000Z
2021-10-21T00:11:18.000Z
from sqlalchemy.orm import Session from fastapi import APIRouter from fastapi import Depends from fastapi import status from fastapi import Security from fastapi_okta import OktaUser from literature import database from literature.user import set_global_user_id from literature.crud import resource_descriptor_crud from literature.routers.authentication import auth router = APIRouter( prefix="/resource_descriptor", tags=['Resource Descriptor'] ) get_db = database.get_db db_session: Session = Depends(get_db) db_user = Security(auth.get_user) @router.get('/', status_code=200) def show(db: Session = db_session): return resource_descriptor_crud.show(db) @router.put('/', status_code=status.HTTP_202_ACCEPTED) def update(user: OktaUser = db_user, db: Session = db_session): set_global_user_id(db, user.id) return resource_descriptor_crud.update(db)
22.390244
52
0.761438
6f8a6c51e0d29192cead5166f87d67b9d0fe1042
1,779
py
Python
facebook_business/adobjects/adcreativetextdata.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
576
2018-05-01T19:09:32.000Z
2022-03-31T11:45:11.000Z
facebook_business/adobjects/adcreativetextdata.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
217
2018-05-03T07:31:59.000Z
2022-03-29T14:19:52.000Z
facebook_business/adobjects/adcreativetextdata.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
323
2018-05-01T20:32:26.000Z
2022-03-29T07:05:12.000Z
# Copyright 2014 Facebook, Inc. # You are hereby granted a non-exclusive, worldwide, royalty-free license to # use, copy, modify, and distribute this software in source code or binary # form for use in connection with the web services and APIs provided by # Facebook. # As with any software that integrates with the Facebook platform, your use # of this software is subject to the Facebook Developer Principles and # Policies [http://developers.facebook.com/policy/]. This copyright notice # shall be included in all copies or substantial portions of the software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from facebook_business.adobjects.abstractobject import AbstractObject """ This class is auto-generated. For any issues or feature requests related to this class, please let us know on github and we'll fix in our codegen framework. We'll not be able to accept pull request for this class. """ class AdCreativeTextData( AbstractObject, ): def __init__(self, api=None): super(AdCreativeTextData, self).__init__() self._isAdCreativeTextData = True self._api = api class Field(AbstractObject.Field): message = 'message' _field_types = { 'message': 'string', } @classmethod def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
34.211538
79
0.7448
62c4d5e6f1bf88ff98a5b0f49ca9138fbb551d3d
843
py
Python
setup.py
jbirky/vplanet_inference
35d69a84aa6d9011d5ff5b26da9d977d69b510bf
[ "MIT" ]
null
null
null
setup.py
jbirky/vplanet_inference
35d69a84aa6d9011d5ff5b26da9d977d69b510bf
[ "MIT" ]
null
null
null
setup.py
jbirky/vplanet_inference
35d69a84aa6d9011d5ff5b26da9d977d69b510bf
[ "MIT" ]
null
null
null
from setuptools import setup setup(name="vplanet_inference", version="0.0.1", description="Python tools for statistical inference with VPLanet", author="Jessica Birky", author_email="[email protected]", license = "MIT", url="https://github.com/jbirky/vplanet_inference", packages=["vplanet_inference"], install_requires = ["numpy", "matplotlib >= 2.0.0", "scipy", "george", "emcee >= 3.0", "dynesty", "corner", "sklearn", "pybind11", "pytest", "h5py", "tqdm", "vplanet >= 2.0.6"] )
35.125
72
0.399763
1e27d8297d34beaf228f689d12f435090da0eef9
155
py
Python
setup.py
Subaru-PFS/ics_hxActor
3fae5388e535635c52e168b8a7aef0bd70b60012
[ "MIT" ]
null
null
null
setup.py
Subaru-PFS/ics_hxActor
3fae5388e535635c52e168b8a7aef0bd70b60012
[ "MIT" ]
null
null
null
setup.py
Subaru-PFS/ics_hxActor
3fae5388e535635c52e168b8a7aef0bd70b60012
[ "MIT" ]
null
null
null
import distutils from distutils.core import setup, Extension import sdss3tools import os sdss3tools.setup( description = "Toy SDSS-3 actor.", )
14.090909
43
0.741935
c3b3c989daa45930d37cbc2617139303bc45d6ba
1,418
py
Python
tests/test_level3/test_not.py
cclauss/soupsieve
1468596369d97c9afd74784933852ddbf04ee2b0
[ "MIT" ]
null
null
null
tests/test_level3/test_not.py
cclauss/soupsieve
1468596369d97c9afd74784933852ddbf04ee2b0
[ "MIT" ]
null
null
null
tests/test_level3/test_not.py
cclauss/soupsieve
1468596369d97c9afd74784933852ddbf04ee2b0
[ "MIT" ]
null
null
null
"""Test not selectors.""" from .. import util from bs4 import BeautifulSoup as BS class TestNot(util.TestCase): """Test not selectors.""" MARKUP = """ <div> <p id="0">Some text <span id="1"> in a paragraph</span>.</p> <a id="2" href="http://google.com">Link</a> <span id="3">Direct child</span> <pre id="pre"> <span id="4">Child 1</span> <span id="5">Child 2</span> <span id="6">Child 3</span> </pre> </div> """ def test_not(self): """Test not.""" self.assert_selector( self.MARKUP, 'div :not([id="1"])', ["0", "2", "3", "4", "5", "6", "pre"], flags=util.HTML ) def test_not_and_type(self): """Test not with a tag.""" self.assert_selector( self.MARKUP, 'span:not([id="1"])', ["3", "4", "5", "6"], flags=util.HTML ) def test_not_case(self): """Test not token case insensitivity.""" self.assert_selector( self.MARKUP, 'div :NOT([id="1"])', ["0", "2", "3", "4", "5", "6", "pre"], flags=util.HTML ) def test_none_inputs(self): """Test weird inputs.""" soup = BS('<span foo="something">text</span>', 'html.parser') soup.span['foo'] = None self.assertEqual(len(soup.select('span:not([foo])')), 0)
24.448276
69
0.476728
4152b63b7f76ac0438df46a8ef3eb677648f3220
1,243
py
Python
python_modules/libraries/dagster-postgres/dagster_postgres/schedule_storage/alembic/versions/c63a27054f08_add_snapshots_to_run_storage.py
JPeer264/dagster-fork
32cc87a36134be7c442fa85d6867eb1d3301aea0
[ "Apache-2.0" ]
3
2020-09-09T04:10:23.000Z
2021-11-08T02:10:42.000Z
python_modules/libraries/dagster-postgres/dagster_postgres/schedule_storage/alembic/versions/c63a27054f08_add_snapshots_to_run_storage.py
JPeer264/dagster-fork
32cc87a36134be7c442fa85d6867eb1d3301aea0
[ "Apache-2.0" ]
2
2021-05-11T13:36:27.000Z
2021-09-03T01:53:11.000Z
python_modules/libraries/dagster-postgres/dagster_postgres/schedule_storage/alembic/versions/c63a27054f08_add_snapshots_to_run_storage.py
JPeer264/dagster-fork
32cc87a36134be7c442fa85d6867eb1d3301aea0
[ "Apache-2.0" ]
null
null
null
"""add snapshots to run storage Revision ID: c63a27054f08 Revises: 1ebdd7a9686f Create Date: 2020-04-09 05:57:20.639458 """ import sqlalchemy as sa from alembic import op from dagster.core.storage.migration.utils import has_column, has_table # alembic magic breaks pylint # pylint: disable=no-member # revision identifiers, used by Alembic. revision = 'c63a27054f08' down_revision = '1ebdd7a9686f' branch_labels = None depends_on = None def upgrade(): if not has_table('snapshots'): op.create_table( 'snapshots', sa.Column('id', sa.Integer, primary_key=True, autoincrement=True, nullable=False), sa.Column('snapshot_id', sa.String(255), unique=True, nullable=False), sa.Column('snapshot_body', sa.LargeBinary, nullable=False), sa.Column('snapshot_type', sa.String(63), nullable=False), ) if not has_column('runs', 'snapshot_id'): op.add_column( 'runs', sa.Column('snapshot_id', sa.String(255), sa.ForeignKey('snapshots.snapshot_id')), ) def downgrade(): if has_column('runs', 'snapshot_id'): op.drop_column('runs', 'snapshot_id') if has_table('snapshots'): op.drop_table('snapshots')
27.021739
94
0.66613
97a3fe90927f9306ceba21f7aa005a77f2e990d2
760
py
Python
trigger1D.py
NCBI-Hackathons/Cells2Image
96fa7e4b3df63b6fecc415412693040bb59ba8d1
[ "MIT" ]
null
null
null
trigger1D.py
NCBI-Hackathons/Cells2Image
96fa7e4b3df63b6fecc415412693040bb59ba8d1
[ "MIT" ]
1
2018-03-21T15:16:40.000Z
2018-03-21T17:59:01.000Z
trigger1D.py
NCBI-Hackathons/Cells2Image
96fa7e4b3df63b6fecc415412693040bb59ba8d1
[ "MIT" ]
3
2018-03-19T16:22:42.000Z
2018-03-20T16:45:27.000Z
from scipy.stats import zscore import numpy as np def gradient_trigger(input_series, threshold=-2.5): # function takes a 1D input series and outputs the gradients, z scores of the gradients # and the frame when the z score threshold is first crossed gradients = np.gradient(input_series) # set gradient of first data point to zero gradients[0] = 0. # set first 2 z_scores to 0 z_scores = [0., 0.] for i in range(2, len(gradients)): z_scores.append(zscore(gradients[:i])[-1]) z_scores = np.array(z_scores) if threshold >= 0: trigger_index = np.where(z_scores >= threshold)[0][0] else: trigger_index = np.where(z_scores <= threshold)[0][0] return gradients, z_scores, trigger_index
28.148148
91
0.675
1f9bcccb6f467a865e224db1defcd314f7a9515b
1,406
py
Python
test/test_shopping_cart.py
jsoles7/shopping-cart
d68251a5fa8122e281c4d2240c9d91316c8acc48
[ "MIT" ]
1
2020-02-16T22:31:20.000Z
2020-02-16T22:31:20.000Z
test/test_shopping_cart.py
jsoles7/shopping-cart
d68251a5fa8122e281c4d2240c9d91316c8acc48
[ "MIT" ]
null
null
null
test/test_shopping_cart.py
jsoles7/shopping-cart
d68251a5fa8122e281c4d2240c9d91316c8acc48
[ "MIT" ]
null
null
null
# shopping-cart/test/my_test.py import pytest from shopping_cart import to_usd, find_product def test_to_usd(): #Should apply correct formatting assert to_usd(7.25) == "$7.25" #Should display two decimal places assert to_usd(8.5) == "$8.50" #Should round to two places assert to_usd(3.444444) == "$3.44" #Should display comma separators assert to_usd(1234567890.5555555) == "$1,234,567,890.56" def test_find_product(): #create a quick list to test product_list = [ {"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99}, {"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99}, {"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50}, {"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25} ] # if there is a match, it should find and return a product product = find_product("7", product_list) assert product["name"] == "Pure Coconut Water With Orange" #the code below is taken from Prof. Rossetti's test examples # if there is no match, it should raise an IndexError with pytest.raises(IndexError): find_product("100", product_list)
37
132
0.650071
a2630ec7f28b03fec914207bc887458ff34058e9
1,504
py
Python
src/Workstation/Important/Path Planning and Motion/astarsearch.py
khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses
3c24390a49a76f893675e606ca24fccdbcce43e2
[ "MIT" ]
null
null
null
src/Workstation/Important/Path Planning and Motion/astarsearch.py
khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses
3c24390a49a76f893675e606ca24fccdbcce43e2
[ "MIT" ]
null
null
null
src/Workstation/Important/Path Planning and Motion/astarsearch.py
khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses
3c24390a49a76f893675e606ca24fccdbcce43e2
[ "MIT" ]
1
2021-06-17T16:28:48.000Z
2021-06-17T16:28:48.000Z
def astar(m,startp,endp,x,y): w,h = x,y sx,sy = startp #Start Point ex,ey = endp #End Point #[parent node, x, y, g, f] node = [None,sx,sy,0,abs(ex-sx)+abs(ey-sy)] closeList = [node] createdList = {} createdList[sy*w+sx] = node k=0 while(closeList): node = closeList.pop(0) x = node[1] y = node[2] l = node[3]+1 k+=1 #find neighbours if k!=0: neighbours = ((x,y+1),(x,y-1),(x+1,y),(x-1,y)) else: neighbours = ((x+1,y),(x-1,y),(x,y+1),(x,y-1)) for nx,ny in neighbours: if nx==ex and ny==ey: path = [(ex,ey)] while node: path.append((node[1],node[2])) node = node[0] return list(reversed(path)) if 0<=nx<w and 0<=ny<h and m[ny][nx]==0: if ny*w+nx not in createdList: nn = (node,nx,ny,l,l+abs(nx-ex)+abs(ny-ey)) createdList[ny*w+nx] = nn #adding to closelist ,using binary heap nni = len(closeList) closeList.append(nn) while nni: i = (nni-1)//2 if closeList[i][4]>nn[4]: closeList[i],closeList[nni] = nn,closeList[i] nni = i else: break return []
34.181818
73
0.400266
d4ca1209faa9939d9288d21b3d75c21952108e26
2,143
py
Python
serial_device.py
s-fifteen-instruments/Digital_Pattern_Generator_DPG1
e8113154ba150a5d19e26171c9204cf22d55760d
[ "MIT" ]
null
null
null
serial_device.py
s-fifteen-instruments/Digital_Pattern_Generator_DPG1
e8113154ba150a5d19e26171c9204cf22d55760d
[ "MIT" ]
null
null
null
serial_device.py
s-fifteen-instruments/Digital_Pattern_Generator_DPG1
e8113154ba150a5d19e26171c9204cf22d55760d
[ "MIT" ]
null
null
null
from __future__ import print_function # Needed for compatibility with Py2 """ Author: Alessandro Cere Modified by: Chin Chean Lim, 03/06/2019 Created: 2017.10.16 Description: General serial device """ import serial import time from serial import SerialException class SerialDevice(serial.Serial): """ The usb device is seen as an object through this class, inherited from the generic serial one. """ def __init__(self, device_path=None, timeout=0.2): """ Initializes the USB device. It requires the full path to the serial device as arguments """ try: serial.Serial.__init__(self, device_path, timeout) self.timeout = timeout self.baudrate = 115200 self.stopbits = serial.STOPBITS_ONE self.bytesize = serial.EIGHTBITS self.parity = serial.PARITY_NONE self._reset_buffers() except SerialException: print('Connection failed') def _closeport(self): self.close() def _reset_buffers(self): self.reset_input_buffer() self.reset_output_buffer() def _getresponse(self, cmd): self._reset_buffers() self.write((cmd + '\n').encode()) return self.readlines() # This function is for the timestamp TDC device. def _getresponseTime(self, cmd, t_sleep): # this function bypass the termination character (since there is none for timestamp mode), streams data from device for the integration time. self._reset_buffers() self.write((cmd + '\n').encode()) memory = b'' time0 = time.time() while (time.time() - time0 < t_sleep): # Stream data for duration of integration time plus some delay set in usbcount_class. Buffer_length = self.in_waiting memory = memory + self.read(Buffer_length) Rlength = len(memory) print(str(Rlength) + " Bytes Recorded") return memory def help(self): """ Prints device help to the screen """ ([print(x.decode().strip()) for x in self._getresponse('help')])
31.057971
149
0.633224
f24ec90c076769a271375a22ab3cf535308f7374
505
py
Python
2017_to_2018.py
ProtD/iwik
cf173c393542988d5b84e12615c53c9853d57358
[ "MIT" ]
null
null
null
2017_to_2018.py
ProtD/iwik
cf173c393542988d5b84e12615c53c9853d57358
[ "MIT" ]
null
null
null
2017_to_2018.py
ProtD/iwik
cf173c393542988d5b84e12615c53c9853d57358
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys input = open(sys.argv[1], 'r') start_city = input.readline().rstrip() cities = set() for i, l in enumerate(input): [f, t, d, p] = l.rstrip().split(' ') cities.add(f) input.close() print("{} {}".format(len(cities), start_city)) for i, city in enumerate(cities): print(i) print(city) input = open(sys.argv[1], 'r') input.readline() for l in input: [f, t, d, p] = l.rstrip().split(' ') print("{} {} {} {}".format(f, t, int(d)+1, p)) input.close()
21.956522
50
0.584158
0c46809a38d2bce4ecd3dc3e4364c6f147e36dc6
2,326
py
Python
Python/Creating_Charts_with_Broken_Axes/broken_axes.py
PeriscopeData/analytics-toolbox
83effdee380c33e5eecea29528acf5375fd496fb
[ "MIT" ]
2
2019-09-27T22:19:09.000Z
2019-12-02T23:12:18.000Z
Python/Creating_Charts_with_Broken_Axes/broken_axes.py
PeriscopeData/analytics-toolbox
83effdee380c33e5eecea29528acf5375fd496fb
[ "MIT" ]
1
2019-10-03T17:46:23.000Z
2019-10-03T17:46:23.000Z
Python/Creating_Charts_with_Broken_Axes/broken_axes.py
PeriscopeData/analytics-toolbox
83effdee380c33e5eecea29528acf5375fd496fb
[ "MIT" ]
2
2021-07-17T18:23:50.000Z
2022-03-03T04:53:03.000Z
# Code help from https://stackoverflow.com/questions/32185411/break-in-x-axis-of-matplotlib?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa import pandas as pd import matplotlib.pylab as plt import numpy as np z=np.array(df) x= z[:,0] y = z[:,1] # 1. create two subplots f,(ax,ax2) = plt.subplots(1,2,sharey=True, facecolor='w') # 2. plot the same data on both axes ax.bar(x, y) ax2.bar(x, y) # 3. limit each x axis to the chosen range a=0 b=3 c=5.5 d=12.5 ax.set_xlim(a,b) ax2.set_xlim(c,d) # ax.set_xlim(0,3) # ax2.set_xlim(5.5,12.5) # 4. hide the spines between ax and ax2 ax.spines['right'].set_visible(False) ax2.spines['left'].set_visible(False) ax.yaxis.tick_left() ax.tick_params(labelright='off') ax2.yaxis.tick_right() # 5. This looks pretty good, and was fairly painless, but you can get that # cut-out diagonal lines look with just a bit more work. The important # thing to know here is that in axes coordinates, which are always # between 0-1, spine endpoints are at these locations (0,0), (0,1), # (1,0), and (1,1). Thus, we just need to put the diagonals in the # appropriate corners of each of our axes, and so long as we use the # right transform and disable clipping. d = .015 # how big to make the diagonal lines in axes coordinates # arguments to pass plot, just so we don't keep repeating them kwargs = dict(transform=ax.transAxes, color='k', clip_on=False) ax.plot((1-d,1+d), (-d,+d), **kwargs) ax.plot((1-d,1+d),(1-d,1+d), **kwargs) kwargs.update(transform=ax2.transAxes) # switch to the bottom axes ax2.plot((-d,+d), (1-d,1+d), **kwargs) ax2.plot((-d,+d), (-d,+d), **kwargs) # What's cool about this is that now if we vary the distance between # ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(), # the diagonal lines will move accordingly, and stay right at the tips # of the spines they are 'breaking' # 6. Make some labels. rects = ax.patches labels = ["%d" % i for i in y] for i, rect, label in zip(x,rects, labels): height = rect.get_height() print(i) if i < b: ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') elif i > c: ax2.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom') plt.show() periscope.output(plt)
33.228571
164
0.687016
9a39930de7c8fb76bf05db8bc2ba6a2895ae7466
2,184
py
Python
examples/com.netease.cloudmusic-pytest/test_discover_music.py
100440175/facebook-wda
d335b5506c210e15a220d6734510f9c40d11261d
[ "MIT" ]
null
null
null
examples/com.netease.cloudmusic-pytest/test_discover_music.py
100440175/facebook-wda
d335b5506c210e15a220d6734510f9c40d11261d
[ "MIT" ]
null
null
null
examples/com.netease.cloudmusic-pytest/test_discover_music.py
100440175/facebook-wda
d335b5506c210e15a220d6734510f9c40d11261d
[ "MIT" ]
null
null
null
# coding: utf-8 # # 网易云音乐测试示例 # import os import time import pytest import wda bundle_id = 'com.netease.cloudmusic' c = wda.Client() s = None USERNAME = os.getenv('USERNAME') PASSWORD = os.getenv('PASSWORD') def account_logout(s): s(nameMatches=u'帐[ ]*号', type='Button').tap() # not support \s, wired s(name=u'退出登录').scroll().tap() s.alert.click(u'确定') def account_netease_login(s): """ 完成网易邮箱登录 """ if s(name=u'发现音乐', type='Button').wait(3, raise_error=False): # Already logged in return s(name=u'网易邮箱').tap() s(type='TextField').set_text(USERNAME+'\n') s(type='SecureTextField').set_text(PASSWORD+'\n') s(name=u'开启云音乐').click_exists(timeout=3.0) assert s(name=u'发现音乐', type='Button').wait(5.0) def alert_callback(session): btns = set([u'不再提醒', 'OK', u'知道了', 'Allow']).intersection(session.alert.buttons()) if len(btns) == 0: raise RuntimeError("Alert can not handled, buttons: " + ', '.join(session.alert.buttons())) session.alert.click(list(btns)[0]) def create_session(): s = c.session(bundle_id) s.set_alert_callback(alert_callback) return s def setup_function(): global s s = create_session() account_netease_login(s) def teardown_function(): s.close() # s = create_session() # account_logout(s) # s.close() def test_discover_music(): """ 测试 发现音乐->私人FM 中的播放功能 """ s(name=u'发现音乐', type='Button').tap() time.sleep(.5) assert s(name=u'听歌识曲', visible=True).wait() s(name=u'私人FM').tap() assert s(name=u'不再播放').exists assert s(name=u'添加到我喜欢的音乐').exists assert s(name=u'00:00', className='StaticText').exists s(nameMatches=u'(暂停|播放)').tap() assert s(name=u'00:00', className='StaticText').wait_gone(10.0) s(name=u'跑步FM').tap() s(name=u'知道了').click_exists(2.0) def test_my_music(): """ 测试 我的音乐->本地音乐 """ s(name=u'我的音乐', type='Button').tap() assert s(name=u'最近播放').wait(2.0) s(name=u'本地音乐').tap() assert s(name=u'管理').wait() s(name=u'播放全部').tap() if __name__ == '__main__': setup_function() test_discover_music() test_my_music() teardown_function()
23.483871
99
0.626374
a3c73a455c57f1799c2358a095293d6394032729
4,228
py
Python
Non-React Stuff/alexa/lambda/skill_env/ask_sdk_model/interfaces/amazonpay/model/request/price.py
ReciPull/reciprogram
b8c7e4610f95c5beafad3c9880fc5beceec523e7
[ "MIT" ]
1
2019-09-16T19:13:13.000Z
2019-09-16T19:13:13.000Z
Non-React Stuff/alexa/lambda/skill_env/ask_sdk_model/interfaces/amazonpay/model/request/price.py
ReciPull/reciprogram
b8c7e4610f95c5beafad3c9880fc5beceec523e7
[ "MIT" ]
5
2021-03-09T03:30:14.000Z
2022-02-26T10:42:17.000Z
alexa/reciPullLambda/ask_sdk_model/interfaces/amazonpay/model/request/price.py
ReciPull/recipull.github.io
e6b800af02658bb7948297c4ddc1b7af6d978839
[ "MIT" ]
null
null
null
# coding: utf-8 # # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. # import pprint import re # noqa: F401 import six import typing from enum import Enum from ask_sdk_model.interfaces.amazonpay.model.request.base_amazon_pay_entity import BaseAmazonPayEntity if typing.TYPE_CHECKING: from typing import Dict, List, Optional from datetime import datetime class Price(BaseAmazonPayEntity): """ This request object specifies amount and currency authorized/captured. :param amount: Amount authorized/captured. :type amount: (optional) str :param currency_code: Currency code for the amount. :type currency_code: (optional) str :param version: Version of the Amazon Pay Entity. Can be 2 or greater. :type version: (optional) str """ deserialized_types = { 'amount': 'str', 'currency_code': 'str', 'object_type': 'str', 'version': 'str' } # type: Dict attribute_map = { 'amount': 'amount', 'currency_code': 'currencyCode', 'object_type': '@type', 'version': '@version' } # type: Dict def __init__(self, amount=None, currency_code=None, version=None): # type: (Optional[str], Optional[str], Optional[str]) -> None """This request object specifies amount and currency authorized/captured. :param amount: Amount authorized/captured. :type amount: (optional) str :param currency_code: Currency code for the amount. :type currency_code: (optional) str :param version: Version of the Amazon Pay Entity. Can be 2 or greater. :type version: (optional) str """ self.__discriminator_value = "Price" # type: str self.object_type = self.__discriminator_value super(Price, self).__init__(object_type=self.__discriminator_value, version=version) self.amount = amount self.currency_code = currency_code def to_dict(self): # type: () -> Dict[str, object] """Returns the model properties as a dict""" result = {} # type: Dict for attr, _ in six.iteritems(self.deserialized_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x.value if isinstance(x, Enum) else x, value )) elif isinstance(value, Enum): result[attr] = value.value elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else (item[0], item[1].value) if isinstance(item[1], Enum) else item, value.items() )) else: result[attr] = value return result def to_str(self): # type: () -> str """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): # type: () -> str """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): # type: (object) -> bool """Returns true if both objects are equal""" if not isinstance(other, Price): return False return self.__dict__ == other.__dict__ def __ne__(self, other): # type: (object) -> bool """Returns true if both objects are not equal""" return not self == other
33.555556
103
0.604541
7ff98c79eb934327b1dc58cadce664e252ae999f
327
py
Python
libraries/urx_python/urx_scripts/urx_modified/urx/__init__.py
giacomotomasi/tennisball_demo
f71cd552e64fe21533abe47b986db6999947c3a9
[ "Apache-2.0" ]
1
2021-08-04T16:18:22.000Z
2021-08-04T16:18:22.000Z
urx/__init__.py
EmilRyberg/P5BinPicking
4a4c302b8aa0ea8e3d361ef62ef3742d8b716352
[ "MIT" ]
null
null
null
urx/__init__.py
EmilRyberg/P5BinPicking
4a4c302b8aa0ea8e3d361ef62ef3742d8b716352
[ "MIT" ]
1
2021-08-03T03:41:41.000Z
2021-08-03T03:41:41.000Z
""" Python library to control an UR robot through its TCP/IP interface """ from urx.urrobot import RobotException, URRobot # noqa __version__ = "0.11.0" try: from urx.robot import Robot except ImportError as ex: print("Exception while importing math3d base robot, disabling use of matrices", ex) Robot = URRobot
25.153846
87
0.737003
7962bc11f0e4d3c6881cbad5af5c8b3e3c2a80cc
344
py
Python
chainercv/functions/__init__.py
beam2d/chainercv
55d34c07cbbd03642b71d375db579433859bd00e
[ "MIT" ]
1,600
2017-06-01T15:37:52.000Z
2022-03-09T08:39:09.000Z
chainercv/functions/__init__.py
beam2d/chainercv
55d34c07cbbd03642b71d375db579433859bd00e
[ "MIT" ]
547
2017-06-01T06:43:16.000Z
2021-05-28T17:14:05.000Z
chainercv/functions/__init__.py
beam2d/chainercv
55d34c07cbbd03642b71d375db579433859bd00e
[ "MIT" ]
376
2017-06-02T01:29:10.000Z
2022-03-13T11:19:59.000Z
from chainercv.functions.ps_roi_average_align_2d import ps_roi_average_align_2d # NOQA from chainercv.functions.ps_roi_average_pooling_2d import ps_roi_average_pooling_2d # NOQA from chainercv.functions.ps_roi_max_align_2d import ps_roi_max_align_2d # NOQA from chainercv.functions.ps_roi_max_pooling_2d import ps_roi_max_pooling_2d # NOQA
68.8
91
0.883721
cfadd1cb503534912f2d4665bfbaf39c1f89a061
916
py
Python
mdde/core/mdde/registry/container/registry_response_helper.py
akharitonov/mdde
b0443f3c9c3ca948e9dda213572926087c214d8d
[ "MIT" ]
1
2021-05-17T11:17:51.000Z
2021-05-17T11:17:51.000Z
mdde/core/mdde/registry/container/registry_response_helper.py
akharitonov/mdde
b0443f3c9c3ca948e9dda213572926087c214d8d
[ "MIT" ]
4
2020-05-30T12:23:04.000Z
2021-12-25T12:59:14.000Z
mdde/core/mdde/registry/container/registry_response_helper.py
akharitonov/mdde
b0443f3c9c3ca948e9dda213572926087c214d8d
[ "MIT" ]
null
null
null
# from mdde.registry.exceptions import RegistryResponseError from mdde.registry.container import RegistryResponse class RegistryResponseHelper: """ Functions commonly used when working with the objects returned from the registry. """ @staticmethod def raise_on_error(response: RegistryResponse): """ Raise an exception if the the registry response returned an error. :param response: RegistryResponse. """ try: if response.failed: # RegistryResponseError raise ValueError(response.error if response.error is not None else 'Registry returned undefined error') except AttributeError as aex: raise TypeError('Expected response type is an instance of RegistryResponse class, ' 'or an object containing a properties "failed" and "error"') from aex
39.826087
97
0.66048
067fa74d288ee0e102c14354a5b9282e8dd579b0
525
py
Python
pagetools/menus/migrations/0004_auto_20160911_1305.py
theithec/pagetools
f5fba7213864555275bddcc1882122f3be843f19
[ "MIT" ]
null
null
null
pagetools/menus/migrations/0004_auto_20160911_1305.py
theithec/pagetools
f5fba7213864555275bddcc1882122f3be843f19
[ "MIT" ]
14
2020-02-23T12:15:49.000Z
2022-02-13T14:07:27.000Z
pagetools/menus/migrations/0004_auto_20160911_1305.py
theithec/django-pagetools
8fb73285acee8c8162329fc2895a3816fcb40165
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2016-09-11 13:05 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("menus", "0003_auto_20160908_1553"), ] operations = [ migrations.AlterModelOptions( name="autopopulated", options={ "verbose_name": "Autopopulated Entry", "verbose_name_plural": "Autopoulated Entries", }, ), ]
22.826087
62
0.590476
c147f271a6c5a39144d6ccc9855a2753f684f981
8,425
py
Python
app.py
AmruthaR8/Damage-car-Detection
98cc842d21eced2d6d3353bb72c1c9bf49fb22a1
[ "MIT" ]
null
null
null
app.py
AmruthaR8/Damage-car-Detection
98cc842d21eced2d6d3353bb72c1c9bf49fb22a1
[ "MIT" ]
null
null
null
app.py
AmruthaR8/Damage-car-Detection
98cc842d21eced2d6d3353bb72c1c9bf49fb22a1
[ "MIT" ]
null
null
null
import os import sys import uvicorn as uvicorn from flask import Flask, render_template, request from object_detector_detection_api_lite import * import flask from werkzeug.utils import secure_filename import tensorflow as tf from PIL import Image import PIL import numpy as np app = Flask(__name__) app.config['UPLOAD_FOLDER'] = 'tmp' host = "0.0.0.0" @app.route('/') def welcome(): return render_template("login.html") @app.route('/index') def welcome1(): return render_template("index.html") @app.route('/uploads') def uploads(): page = request.args bot = dict(page)["variable"] print(bot) return render_template("upload.html",bot="/"+bot) @app.route('/CarDamage', methods=['GET', 'POST']) def upload_file2(): file = [] file = flask.request.files.getlist("file[]") print("****:",file) f = len(file) output = {} if f > 1: for i in file: #i is file storage filename = secure_filename(i.filename) # save file filepath = "out"+"/"+filename i.save(filepath) me=main(filename) filename = secure_filename(i.filename) me1=completenpartial(i) output[filename] = "<b>"+"The car is "+" "+"<font color=Crimson>"+me1+"</font>"+" "+" and the damages of the car are"+" "+"<font color=Crimson>"+str(me).replace("[","").replace("]","").replace("'","")+"</font>"+"</b>" columns = [{"field": "Image FileName", "title": "Image FileName","sortable": True},{"field": "Damage Recognition","title": "Car Damage Parts","sortable": True}] res = [] for key,val in output.items(): b = {columns[0]["field"]:key,columns[1]["field"]:val} res.append(b) return render_template("table1.html", data=res, columns=columns, title='Damage Prediction Results') else: file = flask.request.files["file"] # model = load_model(os.getcwd()+'/model/my_model2.h5') filename = secure_filename(file.filename) # save file filepath = "out"+"/"+filename file.save(filepath) me=main(filename) me1=completenpartial(file) filename = secure_filename(file.filename) output[filename] = "<b>"+"The car is "+" "+"<font color=Crimson>"+me1+"</font>"+" "+" and the damages of the car are"+" "+"<font color=Crimson>"+str(me).replace("[","").replace("]","").replace("'","")+"</font>"+"</b>" return "The car is"+" "+me1+" "+" and the damages of the car are"+" "+str(me).replace("[","").replace("]","").replace("'","") def completenpartial(file): interpreter = tf.lite.Interpreter(model_path="models/fullnpartial_size_224.tflite") interpreter.allocate_tensors() x,y=0,0 img = Image.open(file) img = img.resize((224,224), PIL.Image.ANTIALIAS) # Normalize to [0, 1] data = np.asarray( img, dtype="int32" ) / 255.0 # Inference on input data normalized to [0, 1] inputImg = np.expand_dims(data,0).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], inputImg) interpreter.invoke() output_details = interpreter.get_output_details() output_data = interpreter.get_tensor(output_details[0]['index']) if(format(np.argmax(output_data))=="1"): me1="Partially Captured" else: me1="Completely Captured" return me1 @app.route('/frontnback', methods=['GET', 'POST']) def upload_file3(): interpreter = tf.lite.Interpreter(model_path="models/frontnback_size_299.tflite") interpreter.allocate_tensors() x,y=0,0 file = [] file = flask.request.files.getlist("file[]") f = len(file) # print("/////////////////",f) output = {} if f > 1: for i in file: img = Image.open(i) img = img.resize((299,299), PIL.Image.ANTIALIAS) # Normalize to [0, 1] data = np.asarray( img, dtype="int32" ) / 255.0 # Inference on input data normalized to [0, 1] inputImg = np.expand_dims(data,0).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], inputImg) interpreter.invoke() output_details = interpreter.get_output_details() output_data = interpreter.get_tensor(output_details[0]['index']) if(format(np.argmax(output_data))=="1"): me="Back Side" else: me="Front Side" filename = secure_filename(i.filename) s= filename.rsplit("_", 1)[1] #filename = os.path.join(app.config['UPLOAD_FOLDER'],filename) output[s] = "<b>"+"The car location is facing"+" "+"<font color=Crimson>"+me+"</font>"+"</b>" columns = [{"field": "Image FileName", "title": "Image FileName","sortable": True},{"field": "Front or Back Recognition","title": "Front or Back Recognition","sortable": True}] res = [] for key,val in output.items(): b = {columns[0]["field"]:key,columns[1]["field"]:val} print(b) res.append(b) print(res) return render_template("table1.html", data=res, columns=columns, title='Front and Back Detection') else: file = request.files['file'] print("//////////////////////",file) # model = load_model('E:/BMW_IMAGE_ANALYSIS/model/newmodelcarsvsbmw.h5') # save the model to disk output = {} img = Image.open(file) img = img.resize((299,299), PIL.Image.ANTIALIAS) # Normalize to [0, 1] data = np.asarray( img, dtype="int32" ) / 255.0 # Inference on input data normalized to [0, 1] inputImg = np.expand_dims(data,0).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], inputImg) interpreter.invoke() output_details = interpreter.get_output_details() output_data = interpreter.get_tensor(output_details[0]['index']) if(format(np.argmax(output_data))=="1"): me="Back Side" else: me="Front Side" filename = secure_filename(file.filename) output[filename] = "The car location is facing"+" "+me return("The car location is facing"+" "+me) @app.route('/leftnright', methods=['GET', 'POST']) def upload_file(): interpreter = tf.lite.Interpreter(model_path="models/leftnright_size_224.tflite") interpreter.allocate_tensors() x,y=0,0 file = [] file = flask.request.files.getlist("file[]") f = len(file) output = {} if f > 1: for i in file: img = Image.open(i) img = img.resize((224,224), PIL.Image.ANTIALIAS) # Normalize to [0, 1] data = np.asarray( img, dtype="int32" ) / 255.0 # Inference on input data normalized to [0, 1] inputImg = np.expand_dims(data,0).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], inputImg) interpreter.invoke() output_details = interpreter.get_output_details() output_data = interpreter.get_tensor(output_details[0]['index']) if(format(np.argmax(output_data))=="1"): me="Right Side" else: me="Left Side" filename = secure_filename(i.filename) s= filename.rsplit("_", 1)[1] #filename = os.path.join(app.config['UPLOAD_FOLDER'],filename) output[s] = "<b>"+"The car location is facing"+" "+"<font color=Crimson>"+me+"</font>"+"</b>" columns = [{"field": "Image FileName", "title": "Image FileName","sortable": True},{"field": "Left or Right Recognition","title": "Left or Right Recognition","sortable": True}] res = [] for key,val in output.items(): b = {columns[0]["field"]:key,columns[1]["field"]:val} res.append(b) return render_template("table1.html", data=res, columns=columns, title='Left and Right Detection') else: file = request.files['file'] # model = load_model('E:/BMW_IMAGE_ANALYSIS/model/newmodelcarsvsbmw.h5') # save the model to disk output = {} img = Image.open(file) img = img.resize((224,224), PIL.Image.ANTIALIAS) # Normalize to [0, 1] data = np.asarray( img, dtype="int32" ) / 255.0 # Inference on input data normalized to [0, 1] inputImg = np.expand_dims(data,0).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], inputImg) interpreter.invoke() output_details = interpreter.get_output_details() output_data = interpreter.get_tensor(output_details[0]['index']) if(format(np.argmax(output_data))=="1"): me="Right Side" else: me="Left Side" filename = secure_filename(file.filename) output[filename] = "The car location is facing"+" "+me return(output[filename]) # # if __name__ == '__main__': # app.run(host=host, port=port) if __name__ == "__main__": if "serve" in sys.argv: port = int(os.environ.get("PORT", 8008)) uvicorn.run(app, host = "0.0.0.0", port = port)
31.792453
220
0.667537
9d46f6e30ec83e60ebe6ab53b37612d24386f7a9
2,547
py
Python
test/grab_charset.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
test/grab_charset.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
test/grab_charset.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
# coding: utf-8 """ This test fails in py3.3 environment because `grab.response.body` contains <str>, but it should contains <bytes> """ import six from test.util import build_grab from test.util import BaseGrabTestCase class GrabCharsetDetectionTestCase(BaseGrabTestCase): def setUp(self): self.server.reset() def test_document_charset_option(self): g = build_grab() self.server.response['get.data'] = b'foo' g.go(self.server.get_url()) self.assertEqual(b'foo', g.response.body) g = build_grab() self.server.response['get.data'] = u'фуу'.encode('utf-8') g.go(self.server.get_url()) self.assertEqual(u'фуу'.encode('utf-8'), g.response.body) print(g.response.head) self.assertEqual(g.response.charset, 'utf-8') g = build_grab(document_charset='cp1251') self.server.response['get.data'] = u'фуу'.encode('cp1251') g.go(self.server.get_url()) self.assertEqual(u'фуу'.encode('cp1251'), g.response.body) self.assertEqual(g.response.charset, 'cp1251') def test_document_charset_lowercase(self): self.server.response['charset'] = 'UTF-8' g = build_grab() g.go(self.server.get_url()) self.assertEquals('utf-8', g.doc.charset) def test_dash_issue(self): HTML = '<strong>&#151;</strong>' self.server.response['get.data'] = HTML g = build_grab() g.go(self.server.get_url()) # By default &#[128-160]; are fixed self.assertFalse(g.xpath_one('//strong/text()') == six.unichr(151)) self.assertTrue(g.xpath_one('//strong/text()') == six.unichr(8212)) # disable fix-behaviour g.setup(fix_special_entities=False) g.go(self.server.get_url()) # By default &#[128-160]; are fixed self.assertTrue(g.xpath_one('//strong/text()') == six.unichr(151)) self.assertFalse(g.xpath_one('//strong/text()') == six.unichr(8212)) # Explicitly use unicode_body func g = build_grab() g.go(self.server.get_url()) print(':::', g.response.unicode_body()) self.assertTrue('&#8212;' in g.response.unicode_body()) def test_invalid_charset(self): HTML = '''<head><meta http-equiv="Content-Type" content="text/html; charset=windows-874">' </head><body>test</body>''' self.server.response['get.data'] = HTML g = build_grab() g.go(self.server.get_url()) #print(g.doc.charset)
33.96
76
0.610915
c7b0fdfa5f68259385511d321c140b692755db3d
6,305
py
Python
main.py
heng2j/RL-Trade
5e488945d21dafc2d20dfc47d99e2662129c637b
[ "MIT" ]
null
null
null
main.py
heng2j/RL-Trade
5e488945d21dafc2d20dfc47d99e2662129c637b
[ "MIT" ]
null
null
null
main.py
heng2j/RL-Trade
5e488945d21dafc2d20dfc47d99e2662129c637b
[ "MIT" ]
null
null
null
# Reference Code # https://gist.github.com/arsalanaf/d10e0c9e2422dba94c91e478831acb12 # https://github.com/Stable-Baselines-Team/stable-baselines-tf2 # https://github.com/notadamking/Stock-Trading-Visualization import gym import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import IPython.display as Display import PIL.Image as Image # from stable_baselines.common.policies import MlpPolicy # Using from stable_baselines.common.vec_env import DummyVecEnv # from stable_baselines import PPO from env.StockTradingEnv import StockTradingEnv import pandas as pd import numpy as np from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.layers import Dense, Dropout, Activation, LSTM from tensorflow.keras.optimizers import RMSprop, Adam from collections import deque class DQN: def __init__(self, env, inputshape=(5,42)): self.env = env self.memory = deque(maxlen=20000) self.gamma = 0.85 self.epsilon = 1.0 self.epsilon_min = 0.01 self.epsilon_decay = 0.995 self.learning_rate = 0.005 self.tau = .125 self.inputshape = inputshape self.model = self.create_model() self.target_model = self.create_model() def create_model(self): model = Sequential() # state_shape = list(self.env.observation_space.shape.items())[0][1] #Reshaping for LSTM #state_shape=np.array(state_shape) #state_shape= np.reshape(state_shape, (30,4,1)) ''' model.add(Dense(24, input_dim=state_shape[1], activation="relu")) model.add(Dense(48, activation="relu")) model.add(Dense(24, activation="relu")) model.add(Dense(self.env.action_space.n)) model.compile(loss="mean_squared_error", optimizer=Adam(lr=self.learning_rate)) ''' model.add(LSTM(64, input_shape=self.inputshape, #return_sequences=True, stateful=False )) model.add(Dropout(0.5)) #model.add(LSTM(64, #input_shape=(1,4), #return_sequences=False, # stateful=False # )) model.add(Dropout(0.5)) # print("self.env.action_space: ", self.env.action_space) # print(self.env.action_space.shape[0]) model.add(Dense(self.env.action_space.shape[0], kernel_initializer='lecun_uniform')) model.add(Activation('linear')) #linear output so we can have range of real-valued outputs rms = RMSprop() adam = Adam() model.compile(loss='mse', optimizer=adam) return model def act(self, state): self.epsilon *= self.epsilon_decay self.epsilon = max(self.epsilon_min, self.epsilon) if np.random.random() < self.epsilon: # print("Sampled action space") return self.env.action_space.sample() else: result = np.argmax(self.model.predict(state)[0]) print("self.model.predict(state): ", self.model.predict(state)) if result == 0: return [0, 0] elif result == 1: return [1, 0] else: return result # return np.argmax(self.model.predict(state)[0]) def target_train(self): weights = self.model.get_weights() target_weights = self.target_model.get_weights() for i in range(len(target_weights)): target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau) self.target_model.set_weights(target_weights) def save_model(self, fn): self.model.save(fn) def show_rendered_image(self, rgb_array): """ Convert numpy array to RGB image using PILLOW and show it inline using IPykernel. """ Display.display(Image.fromarray(rgb_array)) def render_all_modes(self, env): """ Retrieve and show environment renderings for all supported modes. """ for mode in self.env.metadata['render.modes']: print('[{}] mode:'.format(mode)) self.show_rendered_image(self.env.render(mode)) # TODO # Confirm how to train the agent # Modiify traiing dataset # Set up test set # - Modify training # Add more osticles df = pd.read_csv('./data/MSFT.csv') df = df.sort_values('Date') replay_size = 10 trials = 5 trial_len = 100 Domain_Randomization_Interval = None filename = 'base_line_render.txt' # filename = 'UDR_render.txt' # The algorithms require a vectorized environment to run env = DummyVecEnv([lambda: StockTradingEnv(df, render_mode='file', filename=filename, replay_size=replay_size, Domain_Randomization_Interval=Domain_Randomization_Interval) ]) obs = env.reset() print("obs_shape: ", obs.shape) gamma = 0.9 epsilon = .95 # updateTargetNetwork = 1000 dqn_agent = DQN(env=env, inputshape=obs.shape[1:]) steps = [] for trial in range(trials): cur_state = obs = env.reset() for step in range(trial_len): action = dqn_agent.act(cur_state) print("Outter action: ", action) print(type(action)) # TODO - Not sure why will return scalar 0 or 1 if action is 0: action = [0, 0] print("0 action: ", action) elif action is 1: action = [1, 0] print("1 action: ", action) new_state, reward, done, info = env.step([action]) reward = reward*10 if not done else -10 # TODO - Need to adjust this for better training / Maybe using other algorithm may help env.render(title="MSFT") # new_state =list(new_state.items())[0][1] # new_state= np.reshape(new_state, (30,4,1)) dqn_agent.target_train() # iterates target model cur_state = new_state if done: break print("Completed trial #{} ".format(trial)) # dqn_agent.render_all_modes(env) model_code = 'baseline_{0}_iterations_{1}_steps_each'.format(trials,trial_len) # model_code = 'UDR_{0}_iterations_{1}_steps_each'.format(trials,Domain_Randomization_Interval) dqn_agent.save_model("model_{}.model".format(model_code))
28.400901
174
0.628707
63c2e1a43e9e8d4c3a628dd98a48f2de0ddb52a5
1,372
py
Python
lib/galaxy/util/monitors.py
mmiladi/galaxy
7857b152cd10d9490ac2433ff2905ca1a47ee32c
[ "CC-BY-3.0" ]
null
null
null
lib/galaxy/util/monitors.py
mmiladi/galaxy
7857b152cd10d9490ac2433ff2905ca1a47ee32c
[ "CC-BY-3.0" ]
null
null
null
lib/galaxy/util/monitors.py
mmiladi/galaxy
7857b152cd10d9490ac2433ff2905ca1a47ee32c
[ "CC-BY-3.0" ]
null
null
null
from __future__ import absolute_import import logging import threading from .sleeper import Sleeper log = logging.getLogger(__name__) DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT = 5 class Monitors: def _init_monitor_thread(self, name, target_name=None, target=None, start=False, config=None): self.monitor_join_sleep = getattr(config, "monitor_thread_join_timeout", DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT) self.monitor_join = self.monitor_join_sleep > 0 self.monitor_sleeper = Sleeper() self.monitor_running = True if target is not None: assert target_name is None monitor_func = target else: target_name = target_name or "monitor" monitor_func = getattr(self, target_name) self.sleeper = Sleeper() self.monitor_thread = threading.Thread(name=name, target=monitor_func) self.monitor_thread.setDaemon(True) if start: self.monitor_thread.start() def stop_monitoring(self): self.monitor_running = False def _monitor_sleep(self, sleep_amount): self.sleeper.sleep(sleep_amount) def shutdown_monitor(self): self.stop_monitoring() self.sleeper.wake() if self.monitor_join: log.debug("Joining monitor thread") self.monitor_thread.join(self.monitor_join_sleep)
30.488889
117
0.687318
00f55e52352d63179dca10207ae28e7bae0b9463
313
py
Python
src/movement/hayHay.py
Quanta-Robotics/Robot-Blueberry
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
[ "MIT" ]
25
2021-06-08T07:09:30.000Z
2021-12-30T06:28:35.000Z
src/movement/hayHay.py
ICT-CoU/Robot-Blueberry
d19fd1be037df9d67de64df57a87006d74cd6c43
[ "MIT" ]
2
2021-05-23T12:54:51.000Z
2021-06-07T17:47:56.000Z
src/movement/hayHay.py
ICT-CoU/Robot-Blueberry
d19fd1be037df9d67de64df57a87006d74cd6c43
[ "MIT" ]
14
2021-06-08T13:02:28.000Z
2021-12-30T20:07:18.000Z
from expression import * say("hay hay ") takePosition() changeDegree([6,10,4,2,5,9,3,1],[80,10,0,100,70,180,180,90],0.05,0) changeDegree([8,7],[140,60]) time.sleep(1) changeDegree([8,7],[0,180]) changeDegree([4,2,6,10,3,1,5,9],[180,130,Initial[6],Initial[10],0,50,Initial[5],Initial[9]],0.05,0) takePosition()
24.076923
99
0.670927
514bfb714cea3509a1068f1d71e20f60d2ff8149
93,023
py
Python
code/tmp_rtrip/email/_header_value_parser.py
emilyemorehouse/ast-and-me
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
[ "MIT" ]
24
2018-01-23T05:28:40.000Z
2021-04-13T20:52:59.000Z
code/tmp_rtrip/email/_header_value_parser.py
emilyemorehouse/ast-and-me
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
[ "MIT" ]
17
2017-12-21T18:32:31.000Z
2018-12-18T17:09:50.000Z
code/tmp_rtrip/email/_header_value_parser.py
emilyemorehouse/ast-and-me
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
[ "MIT" ]
null
null
null
"""Header value parser implementing various email-related RFC parsing rules. The parsing methods defined in this module implement various email related parsing rules. Principal among them is RFC 5322, which is the followon to RFC 2822 and primarily a clarification of the former. It also implements RFC 2047 encoded word decoding. RFC 5322 goes to considerable trouble to maintain backward compatibility with RFC 822 in the parse phase, while cleaning up the structure on the generation phase. This parser supports correct RFC 5322 generation by tagging white space as folding white space only when folding is allowed in the non-obsolete rule sets. Actually, the parser is even more generous when accepting input than RFC 5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages. Where possible deviations from the standard are annotated on the 'defects' attribute of tokens that deviate. The general structure of the parser follows RFC 5322, and uses its terminology where there is a direct correspondence. Where the implementation requires a somewhat different structure than that used by the formal grammar, new terms that mimic the closest existing terms are used. Thus, it really helps to have a copy of RFC 5322 handy when studying this code. Input to the parser is a string that has already been unfolded according to RFC 5322 rules. According to the RFC this unfolding is the very first step, and this parser leaves the unfolding step to a higher level message parser, which will have already detected the line breaks that need unfolding while determining the beginning and end of each header. The output of the parser is a TokenList object, which is a list subclass. A TokenList is a recursive data structure. The terminal nodes of the structure are Terminal objects, which are subclasses of str. These do not correspond directly to terminal objects in the formal grammar, but are instead more practical higher level combinations of true terminals. All TokenList and Terminal objects have a 'value' attribute, which produces the semantically meaningful value of that part of the parse subtree. The value of all whitespace tokens (no matter how many sub-tokens they may contain) is a single space, as per the RFC rules. This includes 'CFWS', which is herein included in the general class of whitespace tokens. There is one exception to the rule that whitespace tokens are collapsed into single spaces in values: in the value of a 'bare-quoted-string' (a quoted-string with no leading or trailing whitespace), any whitespace that appeared between the quotation marks is preserved in the returned value. Note that in all Terminal strings quoted pairs are turned into their unquoted values. All TokenList and Terminal objects also have a string value, which attempts to be a "canonical" representation of the RFC-compliant form of the substring that produced the parsed subtree, including minimal use of quoted pair quoting. Whitespace runs are not collapsed. Comment tokens also have a 'content' attribute providing the string found between the parens (including any nested comments) with whitespace preserved. All TokenList and Terminal objects have a 'defects' attribute which is a possibly empty list all of the defects found while creating the token. Defects may appear on any token in the tree, and a composite list of all defects in the subtree is available through the 'all_defects' attribute of any node. (For Terminal notes x.defects == x.all_defects.) Each object in a parse tree is called a 'token', and each has a 'token_type' attribute that gives the name from the RFC 5322 grammar that it represents. Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters. It is returned in place of lists of (ctext/quoted-pair) and (qtext/quoted-pair). XXX: provide complete list of token types. """ import re import urllib from string import hexdigits from collections import OrderedDict from operator import itemgetter from email import _encoded_words as _ew from email import errors from email import utils WSP = set(' \t') CFWS_LEADER = WSP | set('(') SPECIALS = set('()<>@,:;.\\"[]') ATOM_ENDS = SPECIALS | WSP DOT_ATOM_ENDS = ATOM_ENDS - set('.') PHRASE_ENDS = SPECIALS - set('."(') TSPECIALS = (SPECIALS | set('/?=')) - set('.') TOKEN_ENDS = TSPECIALS | WSP ASPECIALS = TSPECIALS | set("*'%") ATTRIBUTE_ENDS = ASPECIALS | WSP EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%') def quote_string(value): return '"' + str(value).replace('\\', '\\\\').replace('"', '\\"') + '"' class _Folded: def __init__(self, maxlen, policy): self.maxlen = maxlen self.policy = policy self.lastlen = 0 self.stickyspace = None self.firstline = True self.done = [] self.current = [] def newline(self): self.done.extend(self.current) self.done.append(self.policy.linesep) self.current.clear() self.lastlen = 0 def finalize(self): if self.current: self.newline() def __str__(self): return ''.join(self.done) def append(self, stoken): self.current.append(stoken) def append_if_fits(self, token, stoken=None): if stoken is None: stoken = str(token) l = len(stoken) if self.stickyspace is not None: stickyspace_len = len(self.stickyspace) if self.lastlen + stickyspace_len + l <= self.maxlen: self.current.append(self.stickyspace) self.lastlen += stickyspace_len self.current.append(stoken) self.lastlen += l self.stickyspace = None self.firstline = False return True if token.has_fws: ws = token.pop_leading_fws() if ws is not None: self.stickyspace += str(ws) stickyspace_len += len(ws) token._fold(self) return True if stickyspace_len and l + 1 <= self.maxlen: margin = self.maxlen - l if 0 < margin < stickyspace_len: trim = stickyspace_len - margin self.current.append(self.stickyspace[:trim]) self.stickyspace = self.stickyspace[trim:] stickyspace_len = trim self.newline() self.current.append(self.stickyspace) self.current.append(stoken) self.lastlen = l + stickyspace_len self.stickyspace = None self.firstline = False return True if not self.firstline: self.newline() self.current.append(self.stickyspace) self.current.append(stoken) self.stickyspace = None self.firstline = False return True if self.lastlen + l <= self.maxlen: self.current.append(stoken) self.lastlen += l return True if l < self.maxlen: self.newline() self.current.append(stoken) self.lastlen = l return True return False class TokenList(list): token_type = None def __init__(self, *args, **kw): super().__init__(*args, **kw) self.defects = [] def __str__(self): return ''.join(str(x) for x in self) def __repr__(self): return '{}({})'.format(self.__class__.__name__, super().__repr__()) @property def value(self): return ''.join(x.value for x in self if x.value) @property def all_defects(self): return sum((x.all_defects for x in self), self.defects) @property def parts(self): klass = self.__class__ this = [] for token in self: if token.startswith_fws(): if this: yield this[0] if len(this) == 1 else klass(this) this.clear() end_ws = token.pop_trailing_ws() this.append(token) if end_ws: yield klass(this) this = [end_ws] if this: yield this[0] if len(this) == 1 else klass(this) def startswith_fws(self): return self[0].startswith_fws() def pop_leading_fws(self): if self[0].token_type == 'fws': return self.pop(0) return self[0].pop_leading_fws() def pop_trailing_ws(self): if self[-1].token_type == 'cfws': return self.pop(-1) return self[-1].pop_trailing_ws() @property def has_fws(self): for part in self: if part.has_fws: return True return False def has_leading_comment(self): return self[0].has_leading_comment() @property def comments(self): comments = [] for token in self: comments.extend(token.comments) return comments def fold(self, *, policy): maxlen = policy.max_line_length or float('+inf') folded = _Folded(maxlen, policy) self._fold(folded) folded.finalize() return str(folded) def as_encoded_word(self, charset): res = [] ws = self.pop_leading_fws() if ws: res.append(ws) trailer = self.pop(-1) if self[-1].token_type == 'fws' else '' res.append(_ew.encode(str(self), charset)) res.append(trailer) return ''.join(res) def cte_encode(self, charset, policy): res = [] for part in self: res.append(part.cte_encode(charset, policy)) return ''.join(res) def _fold(self, folded): encoding = 'utf-8' if folded.policy.utf8 else 'ascii' for part in self.parts: tstr = str(part) tlen = len(tstr) try: str(part).encode(encoding) except UnicodeEncodeError: if any(isinstance(x, errors.UndecodableBytesDefect) for x in part.all_defects): charset = 'unknown-8bit' else: charset = 'utf-8' tstr = part.cte_encode(charset, folded.policy) tlen = len(tstr) if folded.append_if_fits(part, tstr): continue ws = part.pop_leading_fws() if ws is not None: folded.stickyspace = str(part.pop(0)) if folded.append_if_fits(part): continue if part.has_fws: part._fold(folded) continue folded.append(tstr) folded.newline() def pprint(self, indent=''): print('\n'.join(self._pp(indent=''))) def ppstr(self, indent=''): return '\n'.join(self._pp(indent='')) def _pp(self, indent=''): yield '{}{}/{}('.format(indent, self.__class__.__name__, self. token_type) for token in self: if not hasattr(token, '_pp'): yield indent + ' !! invalid element in token list: {!r}'.format( token) else: yield from token._pp(indent + ' ') if self.defects: extra = ' Defects: {}'.format(self.defects) else: extra = '' yield '{}){}'.format(indent, extra) class WhiteSpaceTokenList(TokenList): @property def value(self): return ' ' @property def comments(self): return [x.content for x in self if x.token_type == 'comment'] class UnstructuredTokenList(TokenList): token_type = 'unstructured' def _fold(self, folded): last_ew = None encoding = 'utf-8' if folded.policy.utf8 else 'ascii' for part in self.parts: tstr = str(part) is_ew = False try: str(part).encode(encoding) except UnicodeEncodeError: if any(isinstance(x, errors.UndecodableBytesDefect) for x in part.all_defects): charset = 'unknown-8bit' else: charset = 'utf-8' if last_ew is not None: chunk = get_unstructured(''.join(folded.current[last_ew :] + [tstr])).as_encoded_word(charset) oldlastlen = sum(len(x) for x in folded.current[:last_ew]) schunk = str(chunk) lchunk = len(schunk) if oldlastlen + lchunk <= folded.maxlen: del folded.current[last_ew:] folded.append(schunk) folded.lastlen = oldlastlen + lchunk continue tstr = part.as_encoded_word(charset) is_ew = True if folded.append_if_fits(part, tstr): if is_ew: last_ew = len(folded.current) - 1 continue if is_ew or last_ew: part._fold_as_ew(folded) continue ws = part.pop_leading_fws() if ws is not None: folded.stickyspace = str(ws) if folded.append_if_fits(part): continue if part.has_fws: part._fold(folded) continue folded.append(tstr) folded.newline() last_ew = None def cte_encode(self, charset, policy): res = [] last_ew = None for part in self: spart = str(part) try: spart.encode('us-ascii') res.append(spart) except UnicodeEncodeError: if last_ew is None: res.append(part.cte_encode(charset, policy)) last_ew = len(res) else: tl = get_unstructured(''.join(res[last_ew:] + [spart])) res.append(tl.as_encoded_word(charset)) return ''.join(res) class Phrase(TokenList): token_type = 'phrase' def _fold(self, folded): last_ew = None encoding = 'utf-8' if folded.policy.utf8 else 'ascii' for part in self.parts: tstr = str(part) tlen = len(tstr) has_ew = False try: str(part).encode(encoding) except UnicodeEncodeError: if any(isinstance(x, errors.UndecodableBytesDefect) for x in part.all_defects): charset = 'unknown-8bit' else: charset = 'utf-8' if last_ew is not None and not part.has_leading_comment(): if part[-1].token_type == 'cfws' and part.comments: remainder = part.pop(-1) else: remainder = '' for i, token in enumerate(part): if token.token_type == 'bare-quoted-string': part[i] = UnstructuredTokenList(token[:]) chunk = get_unstructured(''.join(folded.current[last_ew :] + [tstr])).as_encoded_word(charset) schunk = str(chunk) lchunk = len(schunk) if last_ew + lchunk <= folded.maxlen: del folded.current[last_ew:] folded.append(schunk) folded.lastlen = sum(len(x) for x in folded.current) continue tstr = part.as_encoded_word(charset) tlen = len(tstr) has_ew = True if folded.append_if_fits(part, tstr): if has_ew and not part.comments: last_ew = len(folded.current) - 1 elif part.comments or part.token_type == 'quoted-string': last_ew = None continue part._fold(folded) def cte_encode(self, charset, policy): res = [] last_ew = None is_ew = False for part in self: spart = str(part) try: spart.encode('us-ascii') res.append(spart) except UnicodeEncodeError: is_ew = True if last_ew is None: if not part.comments: last_ew = len(res) res.append(part.cte_encode(charset, policy)) elif not part.has_leading_comment(): if part[-1].token_type == 'cfws' and part.comments: remainder = part.pop(-1) else: remainder = '' for i, token in enumerate(part): if token.token_type == 'bare-quoted-string': part[i] = UnstructuredTokenList(token[:]) tl = get_unstructured(''.join(res[last_ew:] + [spart])) res[last_ew:] = [tl.as_encoded_word(charset)] if (part.comments or not is_ew and part.token_type == 'quoted-string'): last_ew = None return ''.join(res) class Word(TokenList): token_type = 'word' class CFWSList(WhiteSpaceTokenList): token_type = 'cfws' def has_leading_comment(self): return bool(self.comments) class Atom(TokenList): token_type = 'atom' class Token(TokenList): token_type = 'token' class EncodedWord(TokenList): token_type = 'encoded-word' cte = None charset = None lang = None @property def encoded(self): if self.cte is not None: return self.cte _ew.encode(str(self), self.charset) class QuotedString(TokenList): token_type = 'quoted-string' @property def content(self): for x in self: if x.token_type == 'bare-quoted-string': return x.value @property def quoted_value(self): res = [] for x in self: if x.token_type == 'bare-quoted-string': res.append(str(x)) else: res.append(x.value) return ''.join(res) @property def stripped_value(self): for token in self: if token.token_type == 'bare-quoted-string': return token.value class BareQuotedString(QuotedString): token_type = 'bare-quoted-string' def __str__(self): return quote_string(''.join(str(x) for x in self)) @property def value(self): return ''.join(str(x) for x in self) class Comment(WhiteSpaceTokenList): token_type = 'comment' def __str__(self): return ''.join(sum([['('], [self.quote(x) for x in self], [')']], [])) def quote(self, value): if value.token_type == 'comment': return str(value) return str(value).replace('\\', '\\\\').replace('(', '\\(').replace(')' , '\\)') @property def content(self): return ''.join(str(x) for x in self) @property def comments(self): return [self.content] class AddressList(TokenList): token_type = 'address-list' @property def addresses(self): return [x for x in self if x.token_type == 'address'] @property def mailboxes(self): return sum((x.mailboxes for x in self if x.token_type == 'address'), [] ) @property def all_mailboxes(self): return sum((x.all_mailboxes for x in self if x.token_type == 'address'), []) class Address(TokenList): token_type = 'address' @property def display_name(self): if self[0].token_type == 'group': return self[0].display_name @property def mailboxes(self): if self[0].token_type == 'mailbox': return [self[0]] elif self[0].token_type == 'invalid-mailbox': return [] return self[0].mailboxes @property def all_mailboxes(self): if self[0].token_type == 'mailbox': return [self[0]] elif self[0].token_type == 'invalid-mailbox': return [self[0]] return self[0].all_mailboxes class MailboxList(TokenList): token_type = 'mailbox-list' @property def mailboxes(self): return [x for x in self if x.token_type == 'mailbox'] @property def all_mailboxes(self): return [x for x in self if x.token_type in ('mailbox', 'invalid-mailbox')] class GroupList(TokenList): token_type = 'group-list' @property def mailboxes(self): if not self or self[0].token_type != 'mailbox-list': return [] return self[0].mailboxes @property def all_mailboxes(self): if not self or self[0].token_type != 'mailbox-list': return [] return self[0].all_mailboxes class Group(TokenList): token_type = 'group' @property def mailboxes(self): if self[2].token_type != 'group-list': return [] return self[2].mailboxes @property def all_mailboxes(self): if self[2].token_type != 'group-list': return [] return self[2].all_mailboxes @property def display_name(self): return self[0].display_name class NameAddr(TokenList): token_type = 'name-addr' @property def display_name(self): if len(self) == 1: return None return self[0].display_name @property def local_part(self): return self[-1].local_part @property def domain(self): return self[-1].domain @property def route(self): return self[-1].route @property def addr_spec(self): return self[-1].addr_spec class AngleAddr(TokenList): token_type = 'angle-addr' @property def local_part(self): for x in self: if x.token_type == 'addr-spec': return x.local_part @property def domain(self): for x in self: if x.token_type == 'addr-spec': return x.domain @property def route(self): for x in self: if x.token_type == 'obs-route': return x.domains @property def addr_spec(self): for x in self: if x.token_type == 'addr-spec': return x.addr_spec else: return '<>' class ObsRoute(TokenList): token_type = 'obs-route' @property def domains(self): return [x.domain for x in self if x.token_type == 'domain'] class Mailbox(TokenList): token_type = 'mailbox' @property def display_name(self): if self[0].token_type == 'name-addr': return self[0].display_name @property def local_part(self): return self[0].local_part @property def domain(self): return self[0].domain @property def route(self): if self[0].token_type == 'name-addr': return self[0].route @property def addr_spec(self): return self[0].addr_spec class InvalidMailbox(TokenList): token_type = 'invalid-mailbox' @property def display_name(self): return None local_part = domain = route = addr_spec = display_name class Domain(TokenList): token_type = 'domain' @property def domain(self): return ''.join(super().value.split()) class DotAtom(TokenList): token_type = 'dot-atom' class DotAtomText(TokenList): token_type = 'dot-atom-text' class AddrSpec(TokenList): token_type = 'addr-spec' @property def local_part(self): return self[0].local_part @property def domain(self): if len(self) < 3: return None return self[-1].domain @property def value(self): if len(self) < 3: return self[0].value return self[0].value.rstrip() + self[1].value + self[2].value.lstrip() @property def addr_spec(self): nameset = set(self.local_part) if len(nameset) > len(nameset - DOT_ATOM_ENDS): lp = quote_string(self.local_part) else: lp = self.local_part if self.domain is not None: return lp + '@' + self.domain return lp class ObsLocalPart(TokenList): token_type = 'obs-local-part' class DisplayName(Phrase): token_type = 'display-name' @property def display_name(self): res = TokenList(self) if res[0].token_type == 'cfws': res.pop(0) elif res[0][0].token_type == 'cfws': res[0] = TokenList(res[0][1:]) if res[-1].token_type == 'cfws': res.pop() elif res[-1][-1].token_type == 'cfws': res[-1] = TokenList(res[-1][:-1]) return res.value @property def value(self): quote = False if self.defects: quote = True else: for x in self: if x.token_type == 'quoted-string': quote = True if quote: pre = post = '' if self[0].token_type == 'cfws' or self[0][0].token_type == 'cfws': pre = ' ' if self[-1].token_type == 'cfws' or self[-1][-1 ].token_type == 'cfws': post = ' ' return pre + quote_string(self.display_name) + post else: return super().value class LocalPart(TokenList): token_type = 'local-part' @property def value(self): if self[0].token_type == 'quoted-string': return self[0].quoted_value else: return self[0].value @property def local_part(self): res = [DOT] last = DOT last_is_tl = False for tok in (self[0] + [DOT]): if tok.token_type == 'cfws': continue if last_is_tl and tok.token_type == 'dot' and last[-1 ].token_type == 'cfws': res[-1] = TokenList(last[:-1]) is_tl = isinstance(tok, TokenList) if is_tl and last.token_type == 'dot' and tok[0 ].token_type == 'cfws': res.append(TokenList(tok[1:])) else: res.append(tok) last = res[-1] last_is_tl = is_tl res = TokenList(res[1:-1]) return res.value class DomainLiteral(TokenList): token_type = 'domain-literal' @property def domain(self): return ''.join(super().value.split()) @property def ip(self): for x in self: if x.token_type == 'ptext': return x.value class MIMEVersion(TokenList): token_type = 'mime-version' major = None minor = None class Parameter(TokenList): token_type = 'parameter' sectioned = False extended = False charset = 'us-ascii' @property def section_number(self): return self[1].number if self.sectioned else 0 @property def param_value(self): for token in self: if token.token_type == 'value': return token.stripped_value if token.token_type == 'quoted-string': for token in token: if token.token_type == 'bare-quoted-string': for token in token: if token.token_type == 'value': return token.stripped_value return '' class InvalidParameter(Parameter): token_type = 'invalid-parameter' class Attribute(TokenList): token_type = 'attribute' @property def stripped_value(self): for token in self: if token.token_type.endswith('attrtext'): return token.value class Section(TokenList): token_type = 'section' number = None class Value(TokenList): token_type = 'value' @property def stripped_value(self): token = self[0] if token.token_type == 'cfws': token = self[1] if token.token_type.endswith(('quoted-string', 'attribute', 'extended-attribute')): return token.stripped_value return self.value class MimeParameters(TokenList): token_type = 'mime-parameters' @property def params(self): params = OrderedDict() for token in self: if not token.token_type.endswith('parameter'): continue if token[0].token_type != 'attribute': continue name = token[0].value.strip() if name not in params: params[name] = [] params[name].append((token.section_number, token)) for name, parts in params.items(): parts = sorted(parts, key=itemgetter(0)) first_param = parts[0][1] charset = first_param.charset if not first_param.extended and len(parts) > 1: if parts[1][0] == 0: parts[1][1].defects.append(errors.InvalidHeaderDefect( 'duplicate parameter name; duplicate(s) ignored')) parts = parts[:1] value_parts = [] i = 0 for section_number, param in parts: if section_number != i: if not param.extended: param.defects.append(errors.InvalidHeaderDefect( 'duplicate parameter name; duplicate ignored')) continue else: param.defects.append(errors.InvalidHeaderDefect( 'inconsistent RFC2231 parameter numbering')) i += 1 value = param.param_value if param.extended: try: value = urllib.parse.unquote_to_bytes(value) except UnicodeEncodeError: value = urllib.parse.unquote(value, encoding='latin-1') else: try: value = value.decode(charset, 'surrogateescape') except LookupError: value = value.decode('us-ascii', 'surrogateescape') if utils._has_surrogates(value): param.defects.append(errors. UndecodableBytesDefect()) value_parts.append(value) value = ''.join(value_parts) yield name, value def __str__(self): params = [] for name, value in self.params: if value: params.append('{}={}'.format(name, quote_string(value))) else: params.append(name) params = '; '.join(params) return ' ' + params if params else '' class ParameterizedHeaderValue(TokenList): @property def params(self): for token in reversed(self): if token.token_type == 'mime-parameters': return token.params return {} @property def parts(self): if self and self[-1].token_type == 'mime-parameters': return TokenList(self[:-1] + self[-1]) return TokenList(self).parts class ContentType(ParameterizedHeaderValue): token_type = 'content-type' maintype = 'text' subtype = 'plain' class ContentDisposition(ParameterizedHeaderValue): token_type = 'content-disposition' content_disposition = None class ContentTransferEncoding(TokenList): token_type = 'content-transfer-encoding' cte = '7bit' class HeaderLabel(TokenList): token_type = 'header-label' class Header(TokenList): token_type = 'header' def _fold(self, folded): folded.append(str(self.pop(0))) folded.lastlen = len(folded.current[0]) folded.stickyspace = str(self.pop(0)) if self[0 ].token_type == 'cfws' else '' rest = self.pop(0) if self: raise ValueError('Malformed Header token list') rest._fold(folded) class Terminal(str): def __new__(cls, value, token_type): self = super().__new__(cls, value) self.token_type = token_type self.defects = [] return self def __repr__(self): return '{}({})'.format(self.__class__.__name__, super().__repr__()) @property def all_defects(self): return list(self.defects) def _pp(self, indent=''): return ['{}{}/{}({}){}'.format(indent, self.__class__.__name__, self.token_type, super().__repr__(), '' if not self.defects else ' {}'.format(self.defects))] def cte_encode(self, charset, policy): value = str(self) try: value.encode('us-ascii') return value except UnicodeEncodeError: return _ew.encode(value, charset) def pop_trailing_ws(self): return None def pop_leading_fws(self): return None @property def comments(self): return [] def has_leading_comment(self): return False def __getnewargs__(self): return str(self), self.token_type class WhiteSpaceTerminal(Terminal): @property def value(self): return ' ' def startswith_fws(self): return True has_fws = True class ValueTerminal(Terminal): @property def value(self): return self def startswith_fws(self): return False has_fws = False def as_encoded_word(self, charset): return _ew.encode(str(self), charset) class EWWhiteSpaceTerminal(WhiteSpaceTerminal): @property def value(self): return '' @property def encoded(self): return self[:] def __str__(self): return '' has_fws = True DOT = ValueTerminal('.', 'dot') ListSeparator = ValueTerminal(',', 'list-separator') RouteComponentMarker = ValueTerminal('@', 'route-component-marker') _wsp_splitter = re.compile('([{}]+)'.format(''.join(WSP))).split _non_atom_end_matcher = re.compile('[^{}]+'.format(''.join(ATOM_ENDS). replace('\\', '\\\\').replace(']', '\\]'))).match _non_printable_finder = re.compile('[\\x00-\\x20\\x7F]').findall _non_token_end_matcher = re.compile('[^{}]+'.format(''.join(TOKEN_ENDS). replace('\\', '\\\\').replace(']', '\\]'))).match _non_attribute_end_matcher = re.compile('[^{}]+'.format(''.join( ATTRIBUTE_ENDS).replace('\\', '\\\\').replace(']', '\\]'))).match _non_extended_attribute_end_matcher = re.compile('[^{}]+'.format(''.join( EXTENDED_ATTRIBUTE_ENDS).replace('\\', '\\\\').replace(']', '\\]'))).match def _validate_xtext(xtext): """If input token contains ASCII non-printables, register a defect.""" non_printables = _non_printable_finder(xtext) if non_printables: xtext.defects.append(errors.NonPrintableDefect(non_printables)) if utils._has_surrogates(xtext): xtext.defects.append(errors.UndecodableBytesDefect( 'Non-ASCII characters found in header token')) def _get_ptext_to_endchars(value, endchars): """Scan printables/quoted-pairs until endchars and return unquoted ptext. This function turns a run of qcontent, ccontent-without-comments, or dtext-with-quoted-printables into a single string by unquoting any quoted printables. It returns the string, the remaining value, and a flag that is True iff there were any quoted printables decoded. """ fragment, *remainder = _wsp_splitter(value, 1) vchars = [] escape = False had_qp = False for pos in range(len(fragment)): if fragment[pos] == '\\': if escape: escape = False had_qp = True else: escape = True continue if escape: escape = False elif fragment[pos] in endchars: break vchars.append(fragment[pos]) else: pos = pos + 1 return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp def get_fws(value): """FWS = 1*WSP This isn't the RFC definition. We're using fws to represent tokens where folding can be done, but when we are parsing the *un*folding has already been done so we don't need to watch out for CRLF. """ newvalue = value.lstrip() fws = WhiteSpaceTerminal(value[:len(value) - len(newvalue)], 'fws') return fws, newvalue def get_encoded_word(value): """ encoded-word = "=?" charset "?" encoding "?" encoded-text "?=" """ ew = EncodedWord() if not value.startswith('=?'): raise errors.HeaderParseError('expected encoded word but found {}'. format(value)) tok, *remainder = value[2:].split('?=', 1) if tok == value[2:]: raise errors.HeaderParseError('expected encoded word but found {}'. format(value)) remstr = ''.join(remainder) if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits: rest, *remainder = remstr.split('?=', 1) tok = tok + '?=' + rest if len(tok.split()) > 1: ew.defects.append(errors.InvalidHeaderDefect( 'whitespace inside encoded word')) ew.cte = value value = ''.join(remainder) try: text, charset, lang, defects = _ew.decode('=?' + tok + '?=') except ValueError: raise errors.HeaderParseError("encoded word format invalid: '{}'". format(ew.cte)) ew.charset = charset ew.lang = lang ew.defects.extend(defects) while text: if text[0] in WSP: token, text = get_fws(text) ew.append(token) continue chars, *remainder = _wsp_splitter(text, 1) vtext = ValueTerminal(chars, 'vtext') _validate_xtext(vtext) ew.append(vtext) text = ''.join(remainder) return ew, value def get_unstructured(value): """unstructured = (*([FWS] vchar) *WSP) / obs-unstruct obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS) obs-utext = %d0 / obs-NO-WS-CTL / LF / CR obs-NO-WS-CTL is control characters except WSP/CR/LF. So, basically, we have printable runs, plus control characters or nulls in the obsolete syntax, separated by whitespace. Since RFC 2047 uses the obsolete syntax in its specification, but requires whitespace on either side of the encoded words, I can see no reason to need to separate the non-printable-non-whitespace from the printable runs if they occur, so we parse this into xtext tokens separated by WSP tokens. Because an 'unstructured' value must by definition constitute the entire value, this 'get' routine does not return a remaining value, only the parsed TokenList. """ unstructured = UnstructuredTokenList() while value: if value[0] in WSP: token, value = get_fws(value) unstructured.append(token) continue if value.startswith('=?'): try: token, value = get_encoded_word(value) except errors.HeaderParseError: pass else: have_ws = True if len(unstructured) > 0: if unstructured[-1].token_type != 'fws': unstructured.defects.append(errors. InvalidHeaderDefect( 'missing whitespace before encoded word')) have_ws = False if have_ws and len(unstructured) > 1: if unstructured[-2].token_type == 'encoded-word': unstructured[-1] = EWWhiteSpaceTerminal(unstructured [-1], 'fws') unstructured.append(token) continue tok, *remainder = _wsp_splitter(value, 1) vtext = ValueTerminal(tok, 'vtext') _validate_xtext(vtext) unstructured.append(vtext) value = ''.join(remainder) return unstructured def get_qp_ctext(value): """ctext = <printable ascii except \\ ( )> This is not the RFC ctext, since we are handling nested comments in comment and unquoting quoted-pairs here. We allow anything except the '()' characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Since quoted pairs are converted to their unquoted values, what is returned is a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value is ' '. """ ptext, value, _ = _get_ptext_to_endchars(value, '()') ptext = WhiteSpaceTerminal(ptext, 'ptext') _validate_xtext(ptext) return ptext, value def get_qcontent(value): """qcontent = qtext / quoted-pair We allow anything except the DQUOTE character, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Any quoted pairs are converted to their unquoted values, so what is returned is a 'ptext' token. In this case it is a ValueTerminal. """ ptext, value, _ = _get_ptext_to_endchars(value, '"') ptext = ValueTerminal(ptext, 'ptext') _validate_xtext(ptext) return ptext, value def get_atext(value): """atext = <matches _atext_matcher> We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to the token's defects list if we find non-atext characters. """ m = _non_atom_end_matcher(value) if not m: raise errors.HeaderParseError("expected atext but found '{}'". format(value)) atext = m.group() value = value[len(atext):] atext = ValueTerminal(atext, 'atext') _validate_xtext(atext) return atext, value def get_bare_quoted_string(value): """bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE A quoted-string without the leading or trailing white space. Its value is the text between the quote marks, with whitespace preserved and quoted pairs decoded. """ if value[0] != '"': raise errors.HeaderParseError('expected \'"\' but found \'{}\''. format(value)) bare_quoted_string = BareQuotedString() value = value[1:] while value and value[0] != '"': if value[0] in WSP: token, value = get_fws(value) elif value[:2] == '=?': try: token, value = get_encoded_word(value) bare_quoted_string.defects.append(errors. InvalidHeaderDefect('encoded word inside quoted string')) except errors.HeaderParseError: token, value = get_qcontent(value) else: token, value = get_qcontent(value) bare_quoted_string.append(token) if not value: bare_quoted_string.defects.append(errors.InvalidHeaderDefect( 'end of header inside quoted string')) return bare_quoted_string, value return bare_quoted_string, value[1:] def get_comment(value): """comment = "(" *([FWS] ccontent) [FWS] ")" ccontent = ctext / quoted-pair / comment We handle nested comments here, and quoted-pair in our qp-ctext routine. """ if value and value[0] != '(': raise errors.HeaderParseError("expected '(' but found '{}'".format( value)) comment = Comment() value = value[1:] while value and value[0] != ')': if value[0] in WSP: token, value = get_fws(value) elif value[0] == '(': token, value = get_comment(value) else: token, value = get_qp_ctext(value) comment.append(token) if not value: comment.defects.append(errors.InvalidHeaderDefect( 'end of header inside comment')) return comment, value return comment, value[1:] def get_cfws(value): """CFWS = (1*([FWS] comment) [FWS]) / FWS """ cfws = CFWSList() while value and value[0] in CFWS_LEADER: if value[0] in WSP: token, value = get_fws(value) else: token, value = get_comment(value) cfws.append(token) return cfws, value def get_quoted_string(value): """quoted-string = [CFWS] <bare-quoted-string> [CFWS] 'bare-quoted-string' is an intermediate class defined by this parser and not by the RFC grammar. It is the quoted string without any attached CFWS. """ quoted_string = QuotedString() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) token, value = get_bare_quoted_string(value) quoted_string.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) return quoted_string, value def get_atom(value): """atom = [CFWS] 1*atext [CFWS] An atom could be an rfc2047 encoded word. """ atom = Atom() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) if value and value[0] in ATOM_ENDS: raise errors.HeaderParseError("expected atom but found '{}'".format (value)) if value.startswith('=?'): try: token, value = get_encoded_word(value) except errors.HeaderParseError: token, value = get_atext(value) else: token, value = get_atext(value) atom.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) atom.append(token) return atom, value def get_dot_atom_text(value): """ dot-text = 1*atext *("." 1*atext) """ dot_atom_text = DotAtomText() if not value or value[0] in ATOM_ENDS: raise errors.HeaderParseError( "expected atom at a start of dot-atom-text but found '{}'". format(value)) while value and value[0] not in ATOM_ENDS: token, value = get_atext(value) dot_atom_text.append(token) if value and value[0] == '.': dot_atom_text.append(DOT) value = value[1:] if dot_atom_text[-1] is DOT: raise errors.HeaderParseError( "expected atom at end of dot-atom-text but found '{}'".format( '.' + value)) return dot_atom_text, value def get_dot_atom(value): """ dot-atom = [CFWS] dot-atom-text [CFWS] Any place we can have a dot atom, we could instead have an rfc2047 encoded word. """ dot_atom = DotAtom() if value[0] in CFWS_LEADER: token, value = get_cfws(value) dot_atom.append(token) if value.startswith('=?'): try: token, value = get_encoded_word(value) except errors.HeaderParseError: token, value = get_dot_atom_text(value) else: token, value = get_dot_atom_text(value) dot_atom.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) dot_atom.append(token) return dot_atom, value def get_word(value): """word = atom / quoted-string Either atom or quoted-string may start with CFWS. We have to peel off this CFWS first to determine which type of word to parse. Afterward we splice the leading CFWS, if any, into the parsed sub-token. If neither an atom or a quoted-string is found before the next special, a HeaderParseError is raised. The token returned is either an Atom or a QuotedString, as appropriate. This means the 'word' level of the formal grammar is not represented in the parse tree; this is because having that extra layer when manipulating the parse tree is more confusing than it is helpful. """ if value[0] in CFWS_LEADER: leader, value = get_cfws(value) else: leader = None if value[0] == '"': token, value = get_quoted_string(value) elif value[0] in SPECIALS: raise errors.HeaderParseError( "Expected 'atom' or 'quoted-string' but found '{}'".format(value)) else: token, value = get_atom(value) if leader is not None: token[:0] = [leader] return token, value def get_phrase(value): """ phrase = 1*word / obs-phrase obs-phrase = word *(word / "." / CFWS) This means a phrase can be a sequence of words, periods, and CFWS in any order as long as it starts with at least one word. If anything other than words is detected, an ObsoleteHeaderDefect is added to the token's defect list. We also accept a phrase that starts with CFWS followed by a dot; this is registered as an InvalidHeaderDefect, since it is not supported by even the obsolete grammar. """ phrase = Phrase() try: token, value = get_word(value) phrase.append(token) except errors.HeaderParseError: phrase.defects.append(errors.InvalidHeaderDefect( 'phrase does not start with word')) while value and value[0] not in PHRASE_ENDS: if value[0] == '.': phrase.append(DOT) phrase.defects.append(errors.ObsoleteHeaderDefect( "period in 'phrase'")) value = value[1:] else: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] in CFWS_LEADER: token, value = get_cfws(value) phrase.defects.append(errors.ObsoleteHeaderDefect( 'comment found without atom')) else: raise phrase.append(token) return phrase, value def get_local_part(value): """ local-part = dot-atom / quoted-string / obs-local-part """ local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError("expected local-part but found '{}'". format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0] == '\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( 'local-part is not dot-atom, quoted-string, or obs-local-part') ) else: local_part.defects.append(errors.ObsoleteHeaderDefect( 'local-part is not a dot-atom (contains CFWS)')) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( 'local-part contains non-ASCII characters)')) return local_part, value def get_obs_local_part(value): """ obs-local-part = word *("." word) """ obs_local_part = ObsLocalPart() last_non_ws_was_dot = False while value and (value[0] == '\\' or value[0] not in PHRASE_ENDS): if value[0] == '.': if last_non_ws_was_dot: obs_local_part.defects.append(errors.InvalidHeaderDefect( "invalid repeated '.'")) obs_local_part.append(DOT) last_non_ws_was_dot = True value = value[1:] continue elif value[0] == '\\': obs_local_part.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] obs_local_part.defects.append(errors.InvalidHeaderDefect( "'\\' character outside of quoted-string/ccontent")) last_non_ws_was_dot = False continue if obs_local_part and obs_local_part[-1].token_type != 'dot': obs_local_part.defects.append(errors.InvalidHeaderDefect( "missing '.' between words")) try: token, value = get_word(value) last_non_ws_was_dot = False except errors.HeaderParseError: if value[0] not in CFWS_LEADER: raise token, value = get_cfws(value) obs_local_part.append(token) if obs_local_part[0].token_type == 'dot' or obs_local_part[0 ].token_type == 'cfws' and obs_local_part[1].token_type == 'dot': obs_local_part.defects.append(errors.InvalidHeaderDefect( "Invalid leading '.' in local part")) if obs_local_part[-1].token_type == 'dot' or obs_local_part[-1 ].token_type == 'cfws' and obs_local_part[-2].token_type == 'dot': obs_local_part.defects.append(errors.InvalidHeaderDefect( "Invalid trailing '.' in local part")) if obs_local_part.defects: obs_local_part.token_type = 'invalid-obs-local-part' return obs_local_part, value def get_dtext(value): """ dtext = <printable ascii except \\ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Quoted pairs are converted to their unquoted values, so what is returned is a ptext token, in this case a ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is added to the returned token's defect list. """ ptext, value, had_qp = _get_ptext_to_endchars(value, '[]') ptext = ValueTerminal(ptext, 'ptext') if had_qp: ptext.defects.append(errors.ObsoleteHeaderDefect( 'quoted printable found in domain-literal')) _validate_xtext(ptext) return ptext, value def _check_for_early_dl_end(value, domain_literal): if value: return False domain_literal.append(errors.InvalidHeaderDefect( 'end of input inside domain-literal')) domain_literal.append(ValueTerminal(']', 'domain-literal-end')) return True def get_domain_literal(value): """ domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS] """ domain_literal = DomainLiteral() if value[0] in CFWS_LEADER: token, value = get_cfws(value) domain_literal.append(token) if not value: raise errors.HeaderParseError('expected domain-literal') if value[0] != '[': raise errors.HeaderParseError( "expected '[' at start of domain-literal but found '{}'".format (value)) value = value[1:] if _check_for_early_dl_end(value, domain_literal): return domain_literal, value domain_literal.append(ValueTerminal('[', 'domain-literal-start')) if value[0] in WSP: token, value = get_fws(value) domain_literal.append(token) token, value = get_dtext(value) domain_literal.append(token) if _check_for_early_dl_end(value, domain_literal): return domain_literal, value if value[0] in WSP: token, value = get_fws(value) domain_literal.append(token) if _check_for_early_dl_end(value, domain_literal): return domain_literal, value if value[0] != ']': raise errors.HeaderParseError( "expected ']' at end of domain-literal but found '{}'".format( value)) domain_literal.append(ValueTerminal(']', 'domain-literal-end')) value = value[1:] if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) domain_literal.append(token) return domain_literal, value def get_domain(value): """ domain = dot-atom / domain-literal / obs-domain obs-domain = atom *("." atom)) """ domain = Domain() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError("expected domain but found '{}'". format(value)) if value[0] == '[': token, value = get_domain_literal(value) if leader is not None: token[:0] = [leader] domain.append(token) return domain, value try: token, value = get_dot_atom(value) except errors.HeaderParseError: token, value = get_atom(value) if leader is not None: token[:0] = [leader] domain.append(token) if value and value[0] == '.': domain.defects.append(errors.ObsoleteHeaderDefect( 'domain is not a dot-atom (contains CFWS)')) if domain[0].token_type == 'dot-atom': domain[:] = domain[0] while value and value[0] == '.': domain.append(DOT) token, value = get_atom(value[1:]) domain.append(token) return domain, value def get_addr_spec(value): """ addr-spec = local-part "@" domain """ addr_spec = AddrSpec() token, value = get_local_part(value) addr_spec.append(token) if not value or value[0] != '@': addr_spec.defects.append(errors.InvalidHeaderDefect( 'add-spec local part with no domain')) return addr_spec, value addr_spec.append(ValueTerminal('@', 'address-at-symbol')) token, value = get_domain(value[1:]) addr_spec.append(token) return addr_spec, value def get_obs_route(value): """ obs-route = obs-domain-list ":" obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain]) Returns an obs-route token with the appropriate sub-tokens (that is, there is no obs-domain-list in the parse tree). """ obs_route = ObsRoute() while value and (value[0] == ',' or value[0] in CFWS_LEADER): if value[0] in CFWS_LEADER: token, value = get_cfws(value) obs_route.append(token) elif value[0] == ',': obs_route.append(ListSeparator) value = value[1:] if not value or value[0] != '@': raise errors.HeaderParseError( "expected obs-route domain but found '{}'".format(value)) obs_route.append(RouteComponentMarker) token, value = get_domain(value[1:]) obs_route.append(token) while value and value[0] == ',': obs_route.append(ListSeparator) value = value[1:] if not value: break if value[0] in CFWS_LEADER: token, value = get_cfws(value) obs_route.append(token) if value[0] == '@': obs_route.append(RouteComponentMarker) token, value = get_domain(value[1:]) obs_route.append(token) if not value: raise errors.HeaderParseError('end of header while parsing obs-route') if value[0] != ':': raise errors.HeaderParseError( "expected ':' marking end of obs-route but found '{}'".format( value)) obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker')) return obs_route, value[1:] def get_angle_addr(value): """ angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS] """ angle_addr = AngleAddr() if value[0] in CFWS_LEADER: token, value = get_cfws(value) angle_addr.append(token) if not value or value[0] != '<': raise errors.HeaderParseError("expected angle-addr but found '{}'". format(value)) angle_addr.append(ValueTerminal('<', 'angle-addr-start')) value = value[1:] if value[0] == '>': angle_addr.append(ValueTerminal('>', 'angle-addr-end')) angle_addr.defects.append(errors.InvalidHeaderDefect( 'null addr-spec in angle-addr')) value = value[1:] return angle_addr, value try: token, value = get_addr_spec(value) except errors.HeaderParseError: try: token, value = get_obs_route(value) angle_addr.defects.append(errors.ObsoleteHeaderDefect( 'obsolete route specification in angle-addr')) except errors.HeaderParseError: raise errors.HeaderParseError( "expected addr-spec or obs-route but found '{}'".format(value)) angle_addr.append(token) token, value = get_addr_spec(value) angle_addr.append(token) if value and value[0] == '>': value = value[1:] else: angle_addr.defects.append(errors.InvalidHeaderDefect( "missing trailing '>' on angle-addr")) angle_addr.append(ValueTerminal('>', 'angle-addr-end')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) angle_addr.append(token) return angle_addr, value def get_display_name(value): """ display-name = phrase Because this is simply a name-rule, we don't return a display-name token containing a phrase, but rather a display-name token with the content of the phrase. """ display_name = DisplayName() token, value = get_phrase(value) display_name.extend(token[:]) display_name.defects = token.defects[:] return display_name, value def get_name_addr(value): """ name-addr = [display-name] angle-addr """ name_addr = NameAddr() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError("expected name-addr but found '{}'" .format(leader)) if value[0] != '<': if value[0] in PHRASE_ENDS: raise errors.HeaderParseError("expected name-addr but found '{}'" .format(value)) token, value = get_display_name(value) if not value: raise errors.HeaderParseError("expected name-addr but found '{}'" .format(token)) if leader is not None: token[0][:0] = [leader] leader = None name_addr.append(token) token, value = get_angle_addr(value) if leader is not None: token[:0] = [leader] name_addr.append(token) return name_addr, value def get_mailbox(value): """ mailbox = name-addr / addr-spec """ mailbox = Mailbox() try: token, value = get_name_addr(value) except errors.HeaderParseError: try: token, value = get_addr_spec(value) except errors.HeaderParseError: raise errors.HeaderParseError("expected mailbox but found '{}'" .format(value)) if any(isinstance(x, errors.InvalidHeaderDefect) for x in token.all_defects ): mailbox.token_type = 'invalid-mailbox' mailbox.append(token) return mailbox, value def get_invalid_mailbox(value, endchars): """ Read everything up to one of the chars in endchars. This is outside the formal grammar. The InvalidMailbox TokenList that is returned acts like a Mailbox, but the data attributes are None. """ invalid_mailbox = InvalidMailbox() while value and value[0] not in endchars: if value[0] in PHRASE_ENDS: invalid_mailbox.append(ValueTerminal(value[0], 'misplaced-special') ) value = value[1:] else: token, value = get_phrase(value) invalid_mailbox.append(token) return invalid_mailbox, value def get_mailbox_list(value): """ mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS]) For this routine we go outside the formal grammar in order to improve error handling. We recognize the end of the mailbox list only at the end of the value or at a ';' (the group terminator). This is so that we can turn invalid mailboxes into InvalidMailbox tokens and continue parsing any remaining valid mailboxes. We also allow all mailbox entries to be null, and this condition is handled appropriately at a higher level. """ mailbox_list = MailboxList() while value and value[0] != ';': try: token, value = get_mailbox(value) mailbox_list.append(token) except errors.HeaderParseError: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value or value[0] in ',;': mailbox_list.append(leader) mailbox_list.defects.append(errors.ObsoleteHeaderDefect ('empty element in mailbox-list')) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( 'invalid mailbox in mailbox-list')) elif value[0] == ',': mailbox_list.defects.append(errors.ObsoleteHeaderDefect( 'empty element in mailbox-list')) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( 'invalid mailbox in mailbox-list')) if value and value[0] not in ',;': mailbox = mailbox_list[-1] mailbox.token_type = 'invalid-mailbox' token, value = get_invalid_mailbox(value, ',;') mailbox.extend(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( 'invalid mailbox in mailbox-list')) if value and value[0] == ',': mailbox_list.append(ListSeparator) value = value[1:] return mailbox_list, value def get_group_list(value): """ group-list = mailbox-list / CFWS / obs-group-list obs-group-list = 1*([CFWS] ",") [CFWS] """ group_list = GroupList() if not value: group_list.defects.append(errors.InvalidHeaderDefect( 'end of header before group-list')) return group_list, value leader = None if value and value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: group_list.defects.append(errors.InvalidHeaderDefect( 'end of header in group-list')) group_list.append(leader) return group_list, value if value[0] == ';': group_list.append(leader) return group_list, value token, value = get_mailbox_list(value) if len(token.all_mailboxes) == 0: if leader is not None: group_list.append(leader) group_list.extend(token) group_list.defects.append(errors.ObsoleteHeaderDefect( 'group-list with empty entries')) return group_list, value if leader is not None: token[:0] = [leader] group_list.append(token) return group_list, value def get_group(value): """ group = display-name ":" [group-list] ";" [CFWS] """ group = Group() token, value = get_display_name(value) if not value or value[0] != ':': raise errors.HeaderParseError( "expected ':' at end of group display name but found '{}'". format(value)) group.append(token) group.append(ValueTerminal(':', 'group-display-name-terminator')) value = value[1:] if value and value[0] == ';': group.append(ValueTerminal(';', 'group-terminator')) return group, value[1:] token, value = get_group_list(value) group.append(token) if not value: group.defects.append(errors.InvalidHeaderDefect( 'end of header in group')) if value[0] != ';': raise errors.HeaderParseError( "expected ';' at end of group but found {}".format(value)) group.append(ValueTerminal(';', 'group-terminator')) value = value[1:] if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) group.append(token) return group, value def get_address(value): """ address = mailbox / group Note that counter-intuitively, an address can be either a single address or a list of addresses (a group). This is why the returned Address object has a 'mailboxes' attribute which treats a single address as a list of length one. When you need to differentiate between to two cases, extract the single element, which is either a mailbox or a group token. """ address = Address() try: token, value = get_group(value) except errors.HeaderParseError: try: token, value = get_mailbox(value) except errors.HeaderParseError: raise errors.HeaderParseError("expected address but found '{}'" .format(value)) address.append(token) return address, value def get_address_list(value): """ address_list = (address *("," address)) / obs-addr-list obs-addr-list = *([CFWS] ",") address *("," [address / CFWS]) We depart from the formal grammar here by continuing to parse until the end of the input, assuming the input to be entirely composed of an address-list. This is always true in email parsing, and allows us to skip invalid addresses to parse additional valid ones. """ address_list = AddressList() while value: try: token, value = get_address(value) address_list.append(token) except errors.HeaderParseError as err: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value or value[0] == ',': address_list.append(leader) address_list.defects.append(errors.ObsoleteHeaderDefect ('address-list entry with no content')) else: token, value = get_invalid_mailbox(value, ',') if leader is not None: token[:0] = [leader] address_list.append(Address([token])) address_list.defects.append(errors.InvalidHeaderDefect( 'invalid address in address-list')) elif value[0] == ',': address_list.defects.append(errors.ObsoleteHeaderDefect( 'empty element in address-list')) else: token, value = get_invalid_mailbox(value, ',') if leader is not None: token[:0] = [leader] address_list.append(Address([token])) address_list.defects.append(errors.InvalidHeaderDefect( 'invalid address in address-list')) if value and value[0] != ',': mailbox = address_list[-1][0] mailbox.token_type = 'invalid-mailbox' token, value = get_invalid_mailbox(value, ',') mailbox.extend(token) address_list.defects.append(errors.InvalidHeaderDefect( 'invalid address in address-list')) if value: address_list.append(ValueTerminal(',', 'list-separator')) value = value[1:] return address_list, value def parse_mime_version(value): """ mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS] """ mime_version = MIMEVersion() if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( 'Missing MIME version number (eg: 1.0)')) return mime_version if value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value: mime_version.defects.append(errors.HeaderMissingRequiredValue( 'Expected MIME version number but found only CFWS')) digits = '' while value and value[0] != '.' and value[0] not in CFWS_LEADER: digits += value[0] value = value[1:] if not digits.isdigit(): mime_version.defects.append(errors.InvalidHeaderDefect( 'Expected MIME major version number but found {!r}'.format(digits)) ) mime_version.append(ValueTerminal(digits, 'xtext')) else: mime_version.major = int(digits) mime_version.append(ValueTerminal(digits, 'digits')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value or value[0] != '.': if mime_version.major is not None: mime_version.defects.append(errors.InvalidHeaderDefect( 'Incomplete MIME version; found only major number')) if value: mime_version.append(ValueTerminal(value, 'xtext')) return mime_version mime_version.append(ValueTerminal('.', 'version-separator')) value = value[1:] if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if not value: if mime_version.major is not None: mime_version.defects.append(errors.InvalidHeaderDefect( 'Incomplete MIME version; found only major number')) return mime_version digits = '' while value and value[0] not in CFWS_LEADER: digits += value[0] value = value[1:] if not digits.isdigit(): mime_version.defects.append(errors.InvalidHeaderDefect( 'Expected MIME minor version number but found {!r}'.format(digits)) ) mime_version.append(ValueTerminal(digits, 'xtext')) else: mime_version.minor = int(digits) mime_version.append(ValueTerminal(digits, 'digits')) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mime_version.append(token) if value: mime_version.defects.append(errors.InvalidHeaderDefect( 'Excess non-CFWS text after MIME version')) mime_version.append(ValueTerminal(value, 'xtext')) return mime_version def get_invalid_parameter(value): """ Read everything up to the next ';'. This is outside the formal grammar. The InvalidParameter TokenList that is returned acts like a Parameter, but the data attributes are None. """ invalid_parameter = InvalidParameter() while value and value[0] != ';': if value[0] in PHRASE_ENDS: invalid_parameter.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) invalid_parameter.append(token) return invalid_parameter, value def get_ttext(value): """ttext = <matches _ttext_matcher> We allow any non-TOKEN_ENDS in ttext, but add defects to the token's defects list if we find non-ttext characters. We also register defects for *any* non-printables even though the RFC doesn't exclude all of them, because we follow the spirit of RFC 5322. """ m = _non_token_end_matcher(value) if not m: raise errors.HeaderParseError("expected ttext but found '{}'". format(value)) ttext = m.group() value = value[len(ttext):] ttext = ValueTerminal(ttext, 'ttext') _validate_xtext(ttext) return ttext, value def get_token(value): """token = [CFWS] 1*ttext [CFWS] The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or tspecials. We also exclude tabs even though the RFC doesn't. The RFC implies the CFWS but is not explicit about it in the BNF. """ mtoken = Token() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mtoken.append(token) if value and value[0] in TOKEN_ENDS: raise errors.HeaderParseError("expected token but found '{}'". format(value)) token, value = get_ttext(value) mtoken.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) mtoken.append(token) return mtoken, value def get_attrtext(value): """attrtext = 1*(any non-ATTRIBUTE_ENDS character) We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the token's defects list if we find non-attrtext characters. We also register defects for *any* non-printables even though the RFC doesn't exclude all of them, because we follow the spirit of RFC 5322. """ m = _non_attribute_end_matcher(value) if not m: raise errors.HeaderParseError('expected attrtext but found {!r}'. format(value)) attrtext = m.group() value = value[len(attrtext):] attrtext = ValueTerminal(attrtext, 'attrtext') _validate_xtext(attrtext) return attrtext, value def get_attribute(value): """ [CFWS] 1*attrtext [CFWS] This version of the BNF makes the CFWS explicit, and as usual we use a value terminal for the actual run of characters. The RFC equivalent of attrtext is the token characters, with the subtraction of '*', "'", and '%'. We include tab in the excluded set just as we do for token. """ attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError("expected token but found '{}'". format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value def get_extended_attrtext(value): """attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%') This is a special parsing routine so that we get a value that includes % escapes as a single string (which we decode as a single string later). """ m = _non_extended_attribute_end_matcher(value) if not m: raise errors.HeaderParseError( 'expected extended attrtext but found {!r}'.format(value)) attrtext = m.group() value = value[len(attrtext):] attrtext = ValueTerminal(attrtext, 'extended-attrtext') _validate_xtext(attrtext) return attrtext, value def get_extended_attribute(value): """ [CFWS] 1*extended_attrtext [CFWS] This is like the non-extended version except we allow % characters, so that we can pick up an encoded value as a single string. """ attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in EXTENDED_ATTRIBUTE_ENDS: raise errors.HeaderParseError("expected token but found '{}'". format(value)) token, value = get_extended_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value def get_section(value): """ '*' digits The formal BNF is more complicated because leading 0s are not allowed. We check for that and add a defect. We also assume no CFWS is allowed between the '*' and the digits, though the RFC is not crystal clear on that. The caller should already have dealt with leading CFWS. """ section = Section() if not value or value[0] != '*': raise errors.HeaderParseError('Expected section but found {}'. format(value)) section.append(ValueTerminal('*', 'section-marker')) value = value[1:] if not value or not value[0].isdigit(): raise errors.HeaderParseError('Expected section number but found {}' .format(value)) digits = '' while value and value[0].isdigit(): digits += value[0] value = value[1:] if digits[0] == '0' and digits != '0': section.defects.append(errors.InvalidHeaderError( 'section numberhas an invalid leading 0')) section.number = int(digits) section.append(ValueTerminal(digits, 'digits')) return section, value def get_value(value): """ quoted-string / attribute """ v = Value() if not value: raise errors.HeaderParseError('Expected value but found end of string') leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError('Expected value but found only {}'. format(leader)) if value[0] == '"': token, value = get_quoted_string(value) else: token, value = get_extended_attribute(value) if leader is not None: token[:0] = [leader] v.append(token) return v, value def get_parameter(value): """ attribute [section] ["*"] [CFWS] "=" value The CFWS is implied by the RFC but not made explicit in the BNF. This simplified form of the BNF from the RFC is made to conform with the RFC BNF through some extra checks. We do it this way because it makes both error recovery and working with the resulting parse tree easier. """ param = Parameter() token, value = get_attribute(value) param.append(token) if not value or value[0] == ';': param.defects.append(errors.InvalidHeaderDefect( 'Parameter contains name ({}) but no value'.format(token))) return param, value if value[0] == '*': try: token, value = get_section(value) param.sectioned = True param.append(token) except errors.HeaderParseError: pass if not value: raise errors.HeaderParseError('Incomplete parameter') if value[0] == '*': param.append(ValueTerminal('*', 'extended-parameter-marker')) value = value[1:] param.extended = True if value[0] != '=': raise errors.HeaderParseError("Parameter not followed by '='") param.append(ValueTerminal('=', 'parameter-separator')) value = value[1:] leader = None if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) param.append(token) remainder = None appendto = param if param.extended and value and value[0] == '"': qstring, remainder = get_quoted_string(value) inner_value = qstring.stripped_value semi_valid = False if param.section_number == 0: if inner_value and inner_value[0] == "'": semi_valid = True else: token, rest = get_attrtext(inner_value) if rest and rest[0] == "'": semi_valid = True else: try: token, rest = get_extended_attrtext(inner_value) except: pass else: if not rest: semi_valid = True if semi_valid: param.defects.append(errors.InvalidHeaderDefect( 'Quoted string value for extended parameter is invalid')) param.append(qstring) for t in qstring: if t.token_type == 'bare-quoted-string': t[:] = [] appendto = t break value = inner_value else: remainder = None param.defects.append(errors.InvalidHeaderDefect( 'Parameter marked as extended but appears to have a quoted string value that is non-encoded' )) if value and value[0] == "'": token = None else: token, value = get_value(value) if not param.extended or param.section_number > 0: if not value or value[0] != "'": appendto.append(token) if remainder is not None: assert not value, value value = remainder return param, value param.defects.append(errors.InvalidHeaderDefect( 'Apparent initial-extended-value but attribute was not marked as extended or was not initial section' )) if not value: param.defects.append(errors.InvalidHeaderDefect( 'Missing required charset/lang delimiters')) appendto.append(token) if remainder is None: return param, value else: if token is not None: for t in token: if t.token_type == 'extended-attrtext': break t.token_type == 'attrtext' appendto.append(t) param.charset = t.value if value[0] != "'": raise errors.HeaderParseError( 'Expected RFC2231 char/lang encoding delimiter, but found {!r}' .format(value)) appendto.append(ValueTerminal("'", 'RFC2231 delimiter')) value = value[1:] if value and value[0] != "'": token, value = get_attrtext(value) appendto.append(token) param.lang = token.value if not value or value[0] != "'": raise errors.HeaderParseError( 'Expected RFC2231 char/lang encoding delimiter, but found {}' .format(value)) appendto.append(ValueTerminal("'", 'RFC2231 delimiter')) value = value[1:] if remainder is not None: v = Value() while value: if value[0] in WSP: token, value = get_fws(value) else: token, value = get_qcontent(value) v.append(token) token = v else: token, value = get_value(value) appendto.append(token) if remainder is not None: assert not value, value value = remainder return param, value def parse_mime_parameters(value): """ parameter *( ";" parameter ) That BNF is meant to indicate this routine should only be called after finding and handling the leading ';'. There is no corresponding rule in the formal RFC grammar, but it is more convenient for us for the set of parameters to be treated as its own TokenList. This is 'parse' routine because it consumes the reminaing value, but it would never be called to parse a full header. Instead it is called to parse everything after the non-parameter value of a specific MIME header. """ mime_parameters = MimeParameters() while value: try: token, value = get_parameter(value) mime_parameters.append(token) except errors.HeaderParseError as err: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: mime_parameters.append(leader) return mime_parameters if value[0] == ';': if leader is not None: mime_parameters.append(leader) mime_parameters.defects.append(errors.InvalidHeaderDefect( 'parameter entry with no content')) else: token, value = get_invalid_parameter(value) if leader: token[:0] = [leader] mime_parameters.append(token) mime_parameters.defects.append(errors.InvalidHeaderDefect( 'invalid parameter {!r}'.format(token))) if value and value[0] != ';': param = mime_parameters[-1] param.token_type = 'invalid-parameter' token, value = get_invalid_parameter(value) param.extend(token) mime_parameters.defects.append(errors.InvalidHeaderDefect( 'parameter with invalid trailing text {!r}'.format(token))) if value: mime_parameters.append(ValueTerminal(';', 'parameter-separator')) value = value[1:] return mime_parameters def _find_mime_parameters(tokenlist, value): """Do our best to find the parameters in an invalid MIME header """ while value and value[0] != ';': if value[0] in PHRASE_ENDS: tokenlist.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) tokenlist.append(token) if not value: return tokenlist.append(ValueTerminal(';', 'parameter-separator')) tokenlist.append(parse_mime_parameters(value[1:])) def parse_content_type_header(value): """ maintype "/" subtype *( ";" parameter ) The maintype and substype are tokens. Theoretically they could be checked against the official IANA list + x-token, but we don't do that. """ ctype = ContentType() recover = False if not value: ctype.defects.append(errors.HeaderMissingRequiredValue( 'Missing content type specification')) return ctype try: token, value = get_token(value) except errors.HeaderParseError: ctype.defects.append(errors.InvalidHeaderDefect( 'Expected content maintype but found {!r}'.format(value))) _find_mime_parameters(ctype, value) return ctype ctype.append(token) if not value or value[0] != '/': ctype.defects.append(errors.InvalidHeaderDefect('Invalid content type') ) if value: _find_mime_parameters(ctype, value) return ctype ctype.maintype = token.value.strip().lower() ctype.append(ValueTerminal('/', 'content-type-separator')) value = value[1:] try: token, value = get_token(value) except errors.HeaderParseError: ctype.defects.append(errors.InvalidHeaderDefect( 'Expected content subtype but found {!r}'.format(value))) _find_mime_parameters(ctype, value) return ctype ctype.append(token) ctype.subtype = token.value.strip().lower() if not value: return ctype if value[0] != ';': ctype.defects.append(errors.InvalidHeaderDefect( 'Only parameters are valid after content type, but found {!r}'. format(value))) del ctype.maintype, ctype.subtype _find_mime_parameters(ctype, value) return ctype ctype.append(ValueTerminal(';', 'parameter-separator')) ctype.append(parse_mime_parameters(value[1:])) return ctype def parse_content_disposition_header(value): """ disposition-type *( ";" parameter ) """ disp_header = ContentDisposition() if not value: disp_header.defects.append(errors.HeaderMissingRequiredValue( 'Missing content disposition')) return disp_header try: token, value = get_token(value) except errors.HeaderParseError: disp_header.defects.append(errors.InvalidHeaderDefect( 'Expected content disposition but found {!r}'.format(value))) _find_mime_parameters(disp_header, value) return disp_header disp_header.append(token) disp_header.content_disposition = token.value.strip().lower() if not value: return disp_header if value[0] != ';': disp_header.defects.append(errors.InvalidHeaderDefect( 'Only parameters are valid after content disposition, but found {!r}' .format(value))) _find_mime_parameters(disp_header, value) return disp_header disp_header.append(ValueTerminal(';', 'parameter-separator')) disp_header.append(parse_mime_parameters(value[1:])) return disp_header def parse_content_transfer_encoding_header(value): """ mechanism """ cte_header = ContentTransferEncoding() if not value: cte_header.defects.append(errors.HeaderMissingRequiredValue( 'Missing content transfer encoding')) return cte_header try: token, value = get_token(value) except errors.HeaderParseError: cte_header.defects.append(errors.InvalidHeaderDefect( 'Expected content transfer encoding but found {!r}'.format(value))) else: cte_header.append(token) cte_header.cte = token.value.strip().lower() if not value: return cte_header while value: cte_header.defects.append(errors.InvalidHeaderDefect( 'Extra text after content transfer encoding')) if value[0] in PHRASE_ENDS: cte_header.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) cte_header.append(token) return cte_header
33.65521
113
0.595444
5a41d4a81d09aa759ee41e10f44e8ee66880d35b
166
py
Python
python/testData/inspections/AddCallSuperConflictingTupleParam_after.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/inspections/AddCallSuperConflictingTupleParam_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/AddCallSuperConflictingTupleParam_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class A: def __init__(self, (a, (b, c)), (d, e)): pass class B(A): def __init__(self, (a, b), c, e, d): A.__init__(self, (a, (b, c)), (d, e))
23.714286
45
0.451807
0d1b5b34def9efc7a76694484299ff467a28c3d6
2,259
py
Python
apps/categories/tests/test_models.py
ar0ne/mptt-demo
1c1a6c70f1844e1070fe17ff1fb267cae142a97e
[ "MIT" ]
null
null
null
apps/categories/tests/test_models.py
ar0ne/mptt-demo
1c1a6c70f1844e1070fe17ff1fb267cae142a97e
[ "MIT" ]
null
null
null
apps/categories/tests/test_models.py
ar0ne/mptt-demo
1c1a6c70f1844e1070fe17ff1fb267cae142a97e
[ "MIT" ]
null
null
null
""" Unit tests for Models """ from apps.categories.models import Category from apps.categories.tests.conftest import BaseTestCase class TestCategoryModel(BaseTestCase): def test_get_parents(self): self.assertEqual([], list(self.root.get_parents())) self.assertEqual([self.root], list(self.cat1.get_parents())) self.assertEqual([self.cat1, self.root], list(self.cat11.get_parents())) self.assertEqual([self.root], list(self.cat2.get_parents())) def test_get_children(self): self.assertEqual([self.cat1, self.cat2], list(self.root.get_children())) self.assertEqual([], list(self.cat2.get_children())) self.assertEqual([self.cat11, self.cat12], list(self.cat1.get_children())) def test_get_all_children(self): self.assertEqual( [self.cat1, self.cat11, self.cat12, self.cat2], list(self.root.get_all_children()), ) def test_get_siblings(self): self.assertEqual([], list(self.root.get_siblings())) self.assertEqual([self.cat2], list(self.cat1.get_siblings())) self.assertEqual([self.cat1], list(self.cat2.get_siblings())) self.assertEqual([self.cat1, self.cat2], list(self.cat1.get_siblings(True))) self.assertEqual([self.cat1, self.cat2], list(self.cat2.get_siblings(True))) def test_add_node(self): cat3 = Category.add_node("cat3", self.root) self.root.refresh_from_db() self.assertTrue(cat3.parent.lft < cat3.lft < cat3.parent.rgt) self.assertTrue(cat3.parent.lft < cat3.rgt < cat3.parent.rgt) cat31 = Category.add_node("cat31", cat3) cat3.refresh_from_db() self.assertTrue(cat31.parent.parent.lft < cat31.parent.lft < cat31.lft) self.assertTrue(cat31.lft < cat31.parent.rgt < cat31.parent.parent.rgt) self.assertTrue(cat31.parent.parent.lft < cat31.parent.lft < cat31.rgt) self.assertTrue(cat31.rgt < cat31.parent.rgt < cat31.parent.parent.rgt) def test_add_root_node(self): Category.objects.all().delete() new_main = Category.add_node("new_main", None) self.assertEqual(1, new_main.lft) self.assertEqual(2, new_main.rgt) self.assertEqual("new_main", new_main.name)
40.339286
84
0.671979
cf429d32e89d1c97594daca64cfd4b4e1eb792d2
1,809
py
Python
src/tblink_rpc/__main__.py
tblink-rpc/pytblink-rpc
fb3a4d658942107a5882280f082c91d2e3396a35
[ "Apache-2.0" ]
2
2022-03-30T11:57:57.000Z
2022-03-30T12:31:36.000Z
src/tblink_rpc/__main__.py
fvutils/pytblink
7e62355927f8d9558c0b3f95e9eaaa509468131b
[ "Apache-2.0" ]
null
null
null
src/tblink_rpc/__main__.py
fvutils/pytblink
7e62355927f8d9558c0b3f95e9eaaa509468131b
[ "Apache-2.0" ]
null
null
null
''' Created on Jul 8, 2020 @author: ballance ''' import argparse import os import sys tblink_dir = os.path.dirname(os.path.abspath(__file__)) hvl_dir = os.path.join(tblink_dir, "hvl") def files(args): sv_dpi_files = [ os.path.join(hvl_dir, "tblink.sv") ] files = None if args.language == "sv-dpi": files = sv_dpi_files else: pass result = "" for i,file in enumerate(files): if i > 0: result += " " result += file print(result) def lib(args): libpath = None for p in sys.path: if os.path.exists(os.path.join(p, "libtblink-launcher.so")): libpath = os.path.join(p, "libtblink-launcher.so") break print(libpath) def getparser(): parser = argparse.ArgumentParser() subparser = parser.add_subparsers() subparser.required = True subparser.dest = 'command' files_cmd = subparser.add_parser("files", help="Provides files that need to be compiled") files_cmd.set_defaults(func=files) files_cmd.add_argument("-language", default="sv-dpi", choices=["sv-dpi", "vlog-vpi"]) lib_cmd = subparser.add_parser("lib", help="Get library path") lib_cmd.set_defaults(func=lib) run_cmd = subparser.add_parser("run", help="Launch tblink session -- primarily ued internally") run_cmd.add_argument("-host", help="Host to connect to") run_cmd.add_argument("-port", help="Port to connect to") # run_cmd.set_defaults(func=) return parser def main(): parser = getparser() args = parser.parse_args() args.func(args) pass if __name__ == "__main__": main()
22.898734
68
0.578773
d58da54af84a877eedd48b43933b6421be32e806
3,379
py
Python
tensorflow_datasets/core/features/text_feature_test.py
rsepassi/datasets
299f482da52aebe910e91053dbb06a36355f4cde
[ "Apache-2.0" ]
1
2020-12-07T14:55:44.000Z
2020-12-07T14:55:44.000Z
tensorflow_datasets/core/features/text_feature_test.py
rsepassi/datasets
299f482da52aebe910e91053dbb06a36355f4cde
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/core/features/text_feature_test.py
rsepassi/datasets
299f482da52aebe910e91053dbb06a36355f4cde
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding=utf-8 """Tests for tensorflow_datasets.core.features.text_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_datasets.core import features from tensorflow_datasets.core import test_utils from tensorflow_datasets.core.features.text import text_encoder tf.compat.v1.enable_eager_execution() class TextFeatureTest(test_utils.FeatureExpectationsTestCase): @property def expectations(self): nonunicode_text = 'hello world' unicode_text = u'你好' return [ test_utils.FeatureExpectation( name='text', feature=features.Text(), shape=(), dtype=tf.string, tests=[ # Non-unicode test_utils.FeatureExpectationItem( value=nonunicode_text, expected=tf.compat.as_bytes(nonunicode_text), ), # Unicode test_utils.FeatureExpectationItem( value=unicode_text, expected=tf.compat.as_bytes(unicode_text), ), # Empty string test_utils.FeatureExpectationItem( value='', expected=tf.compat.as_bytes(''), ), ], ), # Unicode integer-encoded by byte test_utils.FeatureExpectation( name='text_unicode_encoded', feature=features.Text(encoder=text_encoder.ByteTextEncoder()), shape=(None,), dtype=tf.int64, tests=[ test_utils.FeatureExpectationItem( value=unicode_text, expected=[i + 1 for i in [228, 189, 160, 229, 165, 189]], ), # Empty string test_utils.FeatureExpectationItem( value='', expected=[], ), ], ), ] def test_text_conversion(self): text_f = features.Text(encoder=text_encoder.ByteTextEncoder()) text = u'你好' self.assertEqual(text, text_f.ints2str(text_f.str2ints(text))) def test_save_load_metadata(self): text_f = features.Text( encoder=text_encoder.ByteTextEncoder(additional_tokens=['HI'])) text = u'HI 你好' ids = text_f.str2ints(text) self.assertEqual(1, ids[0]) with test_utils.tmp_dir(self.get_temp_dir()) as data_dir: feature_name = 'dummy' text_f.save_metadata(data_dir, feature_name) new_f = features.Text() new_f.load_metadata(data_dir, feature_name) self.assertEqual(ids, text_f.str2ints(text)) if __name__ == '__main__': test_utils.main()
32.490385
77
0.622078
80b68c18ed9d7192b52c5713c0659d4ebc3a46c9
518
py
Python
apps/wallposts/urlsapi.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
1
2019-01-19T06:58:39.000Z
2019-01-19T06:58:39.000Z
apps/wallposts/urlsapi.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
null
null
null
apps/wallposts/urlsapi.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
null
null
null
from apps.wallposts.views import WallPostDetail from django.conf.urls import patterns, url from surlex.dj import surl from .views import ReactionList, ReactionDetail, WallPostList urlpatterns = patterns('', url(r'^$', WallPostList.as_view(), name='wallpost-list'), surl(r'^<pk:#>$', WallPostDetail.as_view(), name='wallpost-detail'), url(r'^reactions/$', ReactionList.as_view(), name='wallpost-reaction-list'), surl(r'^reactions/<pk:#>$', ReactionDetail.as_view(), name='wallpost-reaction-detail'), )
43.166667
91
0.723938
9dba93c8973cd68021f6b36ee4bb72c596baf41a
2,332
py
Python
Forward_search.py
alvcorcas/PDDL
4b3ff61c7e2793a6f3e78835e5617a2b283fa95e
[ "MIT" ]
null
null
null
Forward_search.py
alvcorcas/PDDL
4b3ff61c7e2793a6f3e78835e5617a2b283fa95e
[ "MIT" ]
null
null
null
Forward_search.py
alvcorcas/PDDL
4b3ff61c7e2793a6f3e78835e5617a2b283fa95e
[ "MIT" ]
null
null
null
import Heuristics as heur no_path = 'There is no path from initial state to target' def forward_search_prego(initial_state, target, actions): return forward_search_prego_aux([], [], initial_state, target, actions) def forward_search_prego_aux(path, visiteds, current, target, actions): if current.satisfy(target): return path applicable = [action for action in actions if current.satisfy_preconditions(action) and current.apply(action) not in visiteds] sorted_applicable = sorted(applicable, key=lambda a: heur.prego( current.apply(a), target.literals, actions)) for action in sorted_applicable: e = current.apply(action) result = forward_search_prego_aux( path + [action], visiteds + [e], e, target, actions) if result != no_path: return result return no_path def forward_search_delta0(initial_state, target, actions): return forward_search_delta0_aux([], [], initial_state, target, actions) def forward_search_delta0_aux(path, visiteds, current, target, actions): if current.satisfy(target): return path applicable = [action for action in actions if current.satisfy_preconditions(action) and current.apply(action) not in visiteds] sorted_applicable = sorted(applicable, key=lambda a: heur.delta0( current.apply(a), target.literals, actions)) for action in sorted_applicable: e = current.apply(action) result = forward_search_delta0_aux( path + [action], visiteds + [e], e, target, actions) if result != no_path: return result return no_path def forward_search_blind(initial_state, target, actions): return forward_search_blind_aux([], [], initial_state, target, actions) def forward_search_blind_aux(path, visiteds, current, target, actions): if current.satisfy(target): return path applicable = [action for action in actions if current.satisfy_preconditions(action) and current.apply(action) not in visiteds] for action in applicable: e = current.apply(action) result = forward_search_blind_aux( path + [action], visiteds + [e], e, target, actions) if result != no_path: return result return no_path
32.388889
98
0.680103
cd384fc521f3a87e755b898c1c263ff78b695d40
3,981
py
Python
tests/test_sensfsp_solver.py
voduchuy/pypacmensl
59b845f4a6891a5c7105696053655437fa181356
[ "MIT" ]
null
null
null
tests/test_sensfsp_solver.py
voduchuy/pypacmensl
59b845f4a6891a5c7105696053655437fa181356
[ "MIT" ]
null
null
null
tests/test_sensfsp_solver.py
voduchuy/pypacmensl
59b845f4a6891a5c7105696053655437fa181356
[ "MIT" ]
null
null
null
import unittest import mpi4py.MPI as mpi import numpy as np import pypacmensl.sensitivity.multi_sinks as sensfsp def tcoeff(t, out): out[0] = 1 out[1] = 1 out[2] = 1 out[3] = 1 def dtcoeff(parameter, t, out): if parameter == 0: out[0] = 1.0 elif parameter == 1: out[1] = 1.0 elif parameter == 2: out[2] = 1.0 elif parameter == 3: out[3] = 1.0 def propensity(reaction, states, outs): if reaction == 0: outs[:] = np.reciprocal(1 + states[:, 1]) return if reaction == 1: outs[:] = states[:, 0] return if reaction == 2: outs[:] = np.reciprocal(1 + states[:, 0]) return if reaction == 3: outs[:] = states[:, 1] def simple_constr(X, out): out[:, 0] = X[:, 0] out[:, 1] = X[:, 1] init_bounds = np.array([10, 10]) class TestFspSolver(unittest.TestCase): def setUp(self): self.stoich_mat = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]]) def test_serial_constructor(self): solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_SELF) def test_set_model(self): solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD) solver.SetModel(num_parameters=4, stoich_matrix=self.stoich_mat, propensity_t=tcoeff, propensity_x=propensity, tv_reactions=list(range(4)), d_propensity_t=dtcoeff, d_propensity_t_sp=[[i] for i in range(4)], d_propensity_x=None ) def test_set_initial_distribution(self): solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD) solver.SetModel(num_parameters=4, stoich_matrix=self.stoich_mat, propensity_t=tcoeff, propensity_x=propensity, tv_reactions=list(range(4)), d_propensity_t=dtcoeff, d_propensity_t_sp=[[i] for i in range(4)], d_propensity_x=None ) X0 = np.array([[0, 0]]) p0 = np.array([1.0]) s0 = np.array([0.0]) solver.SetInitialDist(X0, p0, [s0] * 4) def test_set_shape(self): solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_WORLD) solver.SetModel(num_parameters=4, stoich_matrix=self.stoich_mat, propensity_t=tcoeff, propensity_x=propensity, tv_reactions=list(range(4)), d_propensity_t=dtcoeff, d_propensity_t_sp=[[i] for i in range(4)], d_propensity_x=None ) solver.SetFspShape(simple_constr, init_bounds) def test_solve_serial(self): solver = sensfsp.SensFspSolverMultiSinks(mpi.COMM_SELF) solver.SetModel(num_parameters=4, stoich_matrix=self.stoich_mat, propensity_t=tcoeff, propensity_x=propensity, tv_reactions=list(range(4)), d_propensity_t=dtcoeff, d_propensity_t_sp=[[i] for i in range(4)], d_propensity_x=None ) solver.SetFspShape(simple_constr, init_bounds) X0 = np.array([[0,0]]) p0 = np.array([1.0]) s0 = np.array([0.0]) solver.SetInitialDist(X0, p0, [s0]*4) solution = solver.Solve(10.0, 1.0E-4) prob = np.asarray(solution.GetProbViewer()) self.assertAlmostEqual(prob.sum(), 1.0, 4) for i in range(0,4): svec = np.asarray(solution.GetSensViewer(i)) self.assertAlmostEqual(sum(svec), 0.0, 2) solution.RestoreSensViewer(i, svec) if __name__ == '__main__': unittest.main()
33.453782
70
0.521728
c49942940c083c70545f111763e934873f5cdbdb
7,711
py
Python
tabular/src/autogluon/tabular/models/knn/knn_model.py
gaozhihan/autogluon
77a04ef19bcaa2a81738e51884c1018aa9e4ff65
[ "Apache-2.0" ]
null
null
null
tabular/src/autogluon/tabular/models/knn/knn_model.py
gaozhihan/autogluon
77a04ef19bcaa2a81738e51884c1018aa9e4ff65
[ "Apache-2.0" ]
null
null
null
tabular/src/autogluon/tabular/models/knn/knn_model.py
gaozhihan/autogluon
77a04ef19bcaa2a81738e51884c1018aa9e4ff65
[ "Apache-2.0" ]
null
null
null
import logging import numpy as np import math import psutil import time from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from autogluon.core.constants import REGRESSION from autogluon.core.utils.exceptions import NotEnoughMemoryError from autogluon.core.features.types import R_CATEGORY, R_OBJECT, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor from autogluon.core.models.abstract.model_trial import skip_hpo from autogluon.core.models import AbstractModel logger = logging.getLogger(__name__) # TODO: Normalize data! class KNNModel(AbstractModel): """ KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html """ def __init__(self, **kwargs): super().__init__(**kwargs) self._model_type = self._get_model_type() def _get_model_type(self): if self.problem_type == REGRESSION: return KNeighborsRegressor else: return KNeighborsClassifier def _preprocess(self, X, **kwargs): X = super()._preprocess(X, **kwargs) X = X.fillna(0).to_numpy(dtype=np.float32) return X def _set_default_params(self): default_params = { 'weights': 'uniform', 'n_jobs': -1, } for param, val in default_params.items(): self._set_default_param_value(param, val) def _get_default_auxiliary_params(self) -> dict: default_auxiliary_params = super()._get_default_auxiliary_params() extra_auxiliary_params = dict( ignored_type_group_raw=[R_CATEGORY, R_OBJECT], # TODO: Eventually use category features ignored_type_group_special=[S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT], ) default_auxiliary_params.update(extra_auxiliary_params) return default_auxiliary_params # TODO: Enable HPO for KNN def _get_default_searchspace(self): spaces = {} return spaces def _fit(self, X, y, time_limit=None, sample_weight=None, **kwargs): time_start = time.time() X = self.preprocess(X) self._validate_fit_memory_usage(X=X) # TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately if sample_weight is not None: # TODO: support logger.log(15, "sample_weight not yet supported for KNNModel, this model will ignore them in training.") num_rows_max = len(X) # FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model. if time_limit is None or num_rows_max <= 10000: self.model = self._model_type(**self.params).fit(X, y) else: self.model = self._fit_with_samples(X=X, y=y, time_limit=time_limit - (time.time() - time_start)) def _validate_fit_memory_usage(self, X): max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] model_size_bytes = 4 * X.shape[0] * X.shape[1] # Assuming float32 types expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB available_mem = psutil.virtual_memory().available model_memory_ratio = expected_final_model_size_bytes / available_mem if model_memory_ratio > (0.15 * max_memory_usage_ratio): logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...') if model_memory_ratio > (0.20 * max_memory_usage_ratio): raise NotEnoughMemoryError # don't train full model to avoid OOM error # TODO: Consider making this fully generic and available to all models def _fit_with_samples(self, X, y, time_limit): """ Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used. X and y must already be preprocessed """ time_start = time.time() sample_growth_factor = 2 # Growth factor of each sample in terms of row count sample_time_growth_factor = 8 # Assume next sample will take 8x longer than previous (Somewhat safe but there are datasets where it is even >8x. num_rows_samples = [] num_rows_max = len(X) num_rows_cur = 10000 while True: num_rows_cur = min(num_rows_cur, num_rows_max) num_rows_samples.append(num_rows_cur) if num_rows_cur == num_rows_max: break num_rows_cur *= sample_growth_factor num_rows_cur = math.ceil(num_rows_cur) if num_rows_cur * 1.5 >= num_rows_max: num_rows_cur = num_rows_max def sample_func(chunk, frac): # Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba) n = max(math.ceil(len(chunk) * frac), 1) return chunk.sample(n=n, replace=False, random_state=0) if self.problem_type != REGRESSION: y_df = y.to_frame(name='label').reset_index(drop=True) else: y_df = None time_start_sample_loop = time.time() time_limit_left = time_limit - (time_start_sample_loop - time_start) for i, samples in enumerate(num_rows_samples): if samples != num_rows_max: if self.problem_type == REGRESSION: idx = np.random.choice(num_rows_max, size=samples, replace=False) else: idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index X_samp = X[idx, :] y_samp = y.iloc[idx] else: X_samp = X y_samp = y self.model = self._model_type(**self.params).fit(X_samp, y_samp) time_limit_left_prior = time_limit_left time_fit_end_sample = time.time() time_limit_left = time_limit - (time_fit_end_sample - time_start) time_fit_sample = time_limit_left_prior - time_limit_left time_required_for_next = time_fit_sample * sample_time_growth_factor logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)') if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1: logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)') break return self.model # TODO: Add HPO def _hyperparameter_tune(self, **kwargs): return skip_hpo(self, **kwargs) class FAISSModel(KNNModel): def _get_model_type(self): if self.problem_type == REGRESSION: return FAISSNeighborsRegressor else: return FAISSNeighborsClassifier def _set_default_params(self): default_params = { 'index_factory_string': 'Flat', } for param, val in default_params.items(): self._set_default_param_value(param, val) super()._set_default_params()
44.831395
237
0.663338
b4567f3759d2dbbbd2c7692dd6fae7f9db9d74b6
1,216
py
Python
uwasp/middleware.py
tjhall13/uwasp
a574b70b20e41e7fc4c4fdcf81689eafbdcea83f
[ "Apache-2.0" ]
null
null
null
uwasp/middleware.py
tjhall13/uwasp
a574b70b20e41e7fc4c4fdcf81689eafbdcea83f
[ "Apache-2.0" ]
null
null
null
uwasp/middleware.py
tjhall13/uwasp
a574b70b20e41e7fc4c4fdcf81689eafbdcea83f
[ "Apache-2.0" ]
null
null
null
from .websocket import WebSocket from .handlers import ThreadHandler class WebSocketMiddleware(object): def __init__(self, application, handler_class=ThreadHandler, servers=[]): self.application = application self.handler_class = handler_class self.server_classes = dict(servers) def __call__(self, environ, start_response): if environ.get('HTTP_UPGRADE', '').lower() == 'websocket': # Retrieve websocket server for current path server_class = self.server_classes.get(environ['PATH_INFO']) if server_class: protocol_name = getattr(server_class, 'PROTOCOL_NAME', None) # Construct the websocket object and server ws = WebSocket() server = server_class(ws) # Construct the configured handler to handle # the async I/O required for a websocket request handler = self.handler_class(ws, server) return handler.handle(environ, protocol_name) else: start_response('404 Not Found', []) return [] else: return self.application(environ, start_response)
39.225806
77
0.616776
6a26e004d717f6bcd26e64c96748239dd5d1a71a
633
py
Python
article/migrations/0012_works_work_category.py
skylifewww/artdelo
55d235a59d8a3abdf0f904336c1c75a2be903699
[ "MIT" ]
null
null
null
article/migrations/0012_works_work_category.py
skylifewww/artdelo
55d235a59d8a3abdf0f904336c1c75a2be903699
[ "MIT" ]
null
null
null
article/migrations/0012_works_work_category.py
skylifewww/artdelo
55d235a59d8a3abdf0f904336c1c75a2be903699
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-07-06 17:30 from __future__ import unicode_literals from django.db import migrations import django.db.models.deletion import mptt.fields class Migration(migrations.Migration): dependencies = [ ('article', '0011_auto_20160702_0203'), ] operations = [ migrations.AddField( model_name='works', name='work_category', field=mptt.fields.TreeForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, related_name='works', to='article.Category', verbose_name='Категории'), ), ]
27.521739
185
0.671406
9e9f05eff29b8a5e628cdeb48e90759c5b02c565
1,948
py
Python
data/image_folder.py
a11isonliu/contrastive-unpaired-translation
67651ed9877cae121d9398f46094ce8dbc678802
[ "BSD-3-Clause" ]
null
null
null
data/image_folder.py
a11isonliu/contrastive-unpaired-translation
67651ed9877cae121d9398f46094ce8dbc678802
[ "BSD-3-Clause" ]
null
null
null
data/image_folder.py
a11isonliu/contrastive-unpaired-translation
67651ed9877cae121d9398f46094ce8dbc678802
[ "BSD-3-Clause" ]
null
null
null
"""A modified image folder class We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both current directory and its subdirectories. """ import torch.utils.data as data from PIL import Image import os import os.path IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF', '.npy' ] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def make_dataset(dir, max_dataset_size=float("inf")): images = [] assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir for root, _, fnames in sorted(os.walk(dir, followlinks=True)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) return images[:min(max_dataset_size, len(images))] def default_loader(path): return Image.open(path).convert('RGB') class ImageFolder(data.Dataset): def __init__(self, root, transform=None, return_paths=False, loader=default_loader): imgs = make_dataset(root) if len(imgs) == 0: raise(RuntimeError("Found 0 images in: " + root + "\n" "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader def __getitem__(self, index): path = self.imgs[index] img = self.loader(path) if self.transform is not None: img = self.transform(img) if self.return_paths: return img, path else: return img def __len__(self): return len(self.imgs)
29.074627
122
0.617556
5182f60faa3c9ba570c75cbe0ab80678700f7921
1,321
py
Python
dyndnsc/detector/socket_ip.py
infothrill/python-dyndnsc
7ee6c55b3cca751d7bd7155ac935d4ad13189684
[ "MIT" ]
35
2015-02-13T02:54:49.000Z
2022-03-16T17:03:07.000Z
dyndnsc/detector/socket_ip.py
infothrill/python-dyndnsc
7ee6c55b3cca751d7bd7155ac935d4ad13189684
[ "MIT" ]
121
2015-02-04T15:48:08.000Z
2022-03-27T17:25:58.000Z
dyndnsc/detector/socket_ip.py
infothrill/python-dyndnsc
7ee6c55b3cca751d7bd7155ac935d4ad13189684
[ "MIT" ]
9
2015-04-30T15:48:06.000Z
2018-07-05T12:50:25.000Z
# -*- coding: utf-8 -*- """Module containing logic for socket based detectors.""" import logging from .base import IPDetector, AF_INET6 from ..common.detect_ip import detect_ip, IPV4, IPV6_PUBLIC, GetIpException LOG = logging.getLogger(__name__) class IPDetector_Socket(IPDetector): """Detect IPs used by the system to communicate with outside world.""" configuration_key = "socket" def __init__(self, family=None, *args, **kwargs): """ Initialize. :param family: IP address family (default: INET, possible: INET6) """ super(IPDetector_Socket, self).__init__(*args, family=family, **kwargs) def can_detect_offline(self): """Return False, this detector works offline.""" # unsure about this. detector does not really transmit data to outside, # but unsure if it gives the wanted IPs if system is offline return False def detect(self): """Detect the IP address.""" if self.opts_family == AF_INET6: kind = IPV6_PUBLIC else: # 'INET': kind = IPV4 theip = None try: theip = detect_ip(kind) except GetIpException: LOG.exception("socket detector raised an exception:") self.set_current_value(theip) return theip
29.355556
79
0.635882