blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df776be8e3c5a10d0ea0d5ac96bb71188cc0c541 | be5f4d79910e4a93201664270916dcea51d3b9ee | /fastdownward/experiments/issue627/merge-v3-v5.py | 100f1f2a1136f016756e9f799a53b284019dc988 | [
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] | permissive | mehrdadzakershahrak/Online-Explanation-Generation | 17c3ab727c2a4a60381402ff44e95c0d5fd0e283 | e41ad9b5a390abdaf271562a56105c191e33b74d | refs/heads/master | 2022-12-09T15:49:45.709080 | 2019-12-04T10:23:23 | 2019-12-04T10:23:23 | 184,834,004 | 0 | 0 | MIT | 2022-12-08T17:42:50 | 2019-05-04T00:04:59 | Python | UTF-8 | Python | false | false | 1,574 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir
from relativescatter import RelativeScatterPlotReport
import os
def main(revisions=None):
exp = IssueExperiment(benchmarks_dir=".", suite=[])
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v3-eval"),
filter=lambda(run): "base" not in run["config"],
)
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v5-eval"),
filter=lambda(run): "base" not in run["config"],
)
for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_memory_%s.png' % config_nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_total_time_%s.png' % config_nick
)
exp()
main(revisions=['issue627-v3', 'issue627-v5'])
| [
"[email protected]"
] | |
1a1b7696f4ce2e13094a1f79e092e53fcc9eb461 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j15376-1702/sdB_galex_j15376-1702_lc.py | e2d309905a69f149eca005da69c193f1c0718906 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[234.417917,-17.037508], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_galex_j15376-1702/sdB_galex_j15376-1702_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1ebeccd76c77fb7295b05092f26a7ad953d07807 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2330/60796/280647.py | 2ebf0db21d495fb1321ec9d1115f099b73a5cb61 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | import math
N=int(input())
ls=[]
for i in range(N):
ls.append(input().split(","))
ls[i]=[int(x) for x in ls[i]]
r=[]
for i1 in range(len(ls)-3):
for i2 in range(i1+1,len(ls)-2):
for i3 in range(i2+1,len(ls)-1):
for i4 in range(i3+1,len(ls)):
a=ls[i1]
b=ls[i2]
c=ls[i3]
d=ls[i4]
if (a[0]==c[0] and d[0]==d[0] and (a[1]==b[1] and c[1]==d[1])):
r.append(abs(a[1]-c[1])*abs(b[0]-a[0]))
elif (a[0]==b[0] and c[0]==d[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[1]-b[1])*abs(c[0]-a[0]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==c[1] and a[1]==b[1])):
r.append(abs(a[0]-b[0])*abs(d[1]-a[1]))
elif (a[0]==b[0] and c[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(b[1]-a[1]))
elif (a[0]==c[0] and b[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(c[1]-a[1]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[0]-c[0])*abs(d[1]-a[1]))
elif (a[0]-b[0])!=0 and (c[0]-d[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-b[1])/(a[0]-b[0]))==abs((c[1]-d[1])/(c[0]-d[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-b[0],2)+pow(a[1]-b[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0]-d[0])!=0 and (c[0]-b[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-d[1])/(a[0]-d[0]))==abs((b[1]-c[1])/(b[0]-c[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-d[0],2)+pow(a[1]-d[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0] - d[0]) != 0 and (c[0] - b[0]) != 0 and (a[0] - b[0]) != 0 and (d[0] - c[0]) != 0:
if abs((a[1] - d[1]) / (a[0] - d[0])) == abs((b[1] - c[1]) / (b[0] - c[0])) and abs(
(a[1] - b[1]) / (a[0] - b[0])) == abs((d[1] - c[1]) / (d[0] - c[0])):
x = math.sqrt(pow(a[0] - d[0], 2) + pow(a[1] - d[1], 2))
y = math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
r.append(x * y)
if len(r)==0:
print("0.0000")
else:
s=str(min(r))
if not s.__contains__("."):
s=s+".0000"
else:
i=s.index(".")
t=s[i+1:]
while len(t)<4:
t="0"+t
if len(t)>4:
s=s[:i+1]+t[:4]
print(s) | [
"[email protected]"
] | |
61c8435f832d61befe8894c8dbea7b181fd8b002 | b26c41926fa3a7c2c061132d80e91a2750f2f468 | /tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_adjoint.py | 69c51544cf53762799d4572987b1106ebd7474ad | [
"Apache-2.0"
] | permissive | tensorflow/probability | 22e679a4a883e408f8ef237cda56e3e3dfa42b17 | 42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5 | refs/heads/main | 2023-09-04T02:06:08.174935 | 2023-08-31T20:30:00 | 2023-08-31T20:31:33 | 108,053,674 | 4,055 | 1,269 | Apache-2.0 | 2023-09-13T21:49:49 | 2017-10-23T23:50:54 | Jupyter Notebook | UTF-8 | Python | false | false | 9,797 | py | # Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# pylint: disable=g-inconsistent-quotes
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes the adjoint of a `LinearOperator`."""
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util
# from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorAdjoint"]
# @tf_export("linalg.LinearOperatorAdjoint")
# @linear_operator.make_composite_tensor
class LinearOperatorAdjoint(linear_operator.LinearOperator):
"""`LinearOperator` representing the adjoint of another operator.
This operator represents the adjoint of another operator.
```python
# Create a 2 x 2 linear operator.
operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]])
operator_adjoint = LinearOperatorAdjoint(operator)
operator_adjoint.to_dense()
==> [[1. + i, 0.]
[3., 1 - i]]
tensor_shape.TensorShape(operator_adjoint.shape)
==> [2, 2]
operator_adjoint.log_abs_determinant()
==> - log(2)
x = ... Shape [2, 4] Tensor
operator_adjoint.matmul(x)
==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True)
```
#### Performance
The performance of `LinearOperatorAdjoint` depends on the underlying
operators performance.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operator,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorAdjoint`.
`LinearOperatorAdjoint` is initialized with an operator `A`. The `solve`
and `matmul` methods effectively flip the `adjoint` argument. E.g.
```
A = MyLinearOperator(...)
B = LinearOperatorAdjoint(A)
x = [....] # a vector
assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False)
```
Args:
operator: `LinearOperator` object.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is `operator.name +
"_adjoint"`.
Raises:
ValueError: If `operator.is_non_singular` is False.
"""
parameters = dict(
operator=operator,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name,
)
self._operator = operator
# The congruency of is_non_singular and is_self_adjoint was checked in the
# base operator.
combine_hint = (
linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
is_square = combine_hint(
operator, "is_square", is_square,
"An operator is square if and only if its adjoint is square.")
is_non_singular = combine_hint(
operator, "is_non_singular", is_non_singular,
"An operator is non-singular if and only if its adjoint is "
"non-singular.")
is_self_adjoint = combine_hint(
operator, "is_self_adjoint", is_self_adjoint,
"An operator is self-adjoint if and only if its adjoint is "
"self-adjoint.")
is_positive_definite = combine_hint(
operator, "is_positive_definite", is_positive_definite,
"An operator is positive-definite if and only if its adjoint is "
"positive-definite.")
# Initialization.
if name is None:
name = operator.name + "_adjoint"
with ops.name_scope(name):
super(LinearOperatorAdjoint, self).__init__(
dtype=operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
@property
def operator(self):
"""The operator before taking the adjoint."""
return self._operator
def _linop_adjoint(self) -> linear_operator.LinearOperator:
return self.operator
def _assert_non_singular(self):
return self.operator.assert_non_singular()
def _assert_positive_definite(self):
return self.operator.assert_positive_definite()
def _assert_self_adjoint(self):
return self.operator.assert_self_adjoint()
def _shape(self):
# Rotate last dimension
shape = tensor_shape.TensorShape(self.operator.shape)
return shape[:-2].concatenate([shape[-1], shape[-2]])
def _shape_tensor(self):
# Rotate last dimension
shape = self.operator.shape_tensor()
return prefer_static.concat([
shape[:-2], [shape[-1], shape[-2]]], axis=-1)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return self.operator.matmul(
x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
return self.operator.matvec(x, adjoint=(not adjoint))
def _determinant(self):
if self.is_self_adjoint:
return self.operator.determinant()
return math_ops.conj(self.operator.determinant())
def _log_abs_determinant(self):
return self.operator.log_abs_determinant()
def _trace(self):
if self.is_self_adjoint:
return self.operator.trace()
return math_ops.conj(self.operator.trace())
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.solve(
rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
return self.operator.solvevec(rhs, adjoint=(not adjoint))
def _to_dense(self):
if self.is_self_adjoint:
return self.operator.to_dense()
return linalg.adjoint(self.operator.to_dense())
def _add_to_tensor(self, x):
return self.to_dense() + x
def _eigvals(self):
eigvals = self.operator.eigvals()
if not self.operator.is_self_adjoint:
eigvals = math_ops.conj(eigvals)
return eigvals
def _cond(self):
return self.operator.cond()
@property
def _composite_tensor_fields(self):
return ("operator",)
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"operator": 0}
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.substrates.numpy.internal.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.substrates.numpy.internal.tensorshape_util")
prefer_static = private.LazyLoader(
"prefer_static", globals(),
"tensorflow_probability.substrates.numpy.internal.prefer_static")
| [
"[email protected]"
] | |
c5971d938e49b66b654a919ac6e2e69b5337945b | a4a754bb5d2b92707c5b0a7a669246079ab73633 | /8_kyu/derive.py | 6efcdb1118f8b8cb017f87a2a9c1cd42ddd88128 | [] | no_license | halfendt/Codewars | f6e0d81d9b10eb5bc66615eeae082adb093c09b3 | 8fe4ce76824beece0168eb39776a2f9e078f0785 | refs/heads/master | 2023-07-11T13:58:18.069265 | 2021-08-15T18:40:49 | 2021-08-15T18:40:49 | 259,995,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | def derive(coefficient, exponent):
"""
Take the Derivative Kata
https://www.codewars.com/kata/5963c18ecb97be020b0000a2
"""
return str(coefficient*exponent)+'x^'+str(exponent - 1) | [
"[email protected]"
] | |
808ac04d8b0105120d3c78e0d0dac9ac656771c7 | bbdc377bfe1f94364de4f7edc1cb19942904cb24 | /Manifolds2D.py | 8e27ee58b4ff17cb0e88be881a86cd8e3c763290 | [] | no_license | ctralie/TwistyTakens | e166139f13b25b8a9885dee11b7267017f73dc28 | 9e1200a1ad9e10b31eb0a32b5073854cacdefcc9 | refs/heads/master | 2021-03-19T17:34:37.457455 | 2018-08-28T15:54:22 | 2018-08-28T15:54:22 | 93,522,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,285 | py | import numpy as np
from Utilities import *
def getSphereTimeSeries(theta, phi, u, geodesic = False):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*np.cos(phi)
X[:, 1] = np.sin(theta)*np.cos(phi)
X[:, 2] = np.sin(phi)
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
x = d
if geodesic:
x = np.arccos(d)
return x.flatten()
def getRP2TimeSeries(theta, phi):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*np.cos(phi)
X[:, 1] = np.sin(theta)*np.cos(phi)
X[:, 2] = np.sin(phi)
u = np.random.randn(3, 1)
u = u/np.sqrt(np.sum(u**2))
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
x = np.arccos(np.abs(d))
return x.flatten()
def getBulgingSphereTimeSeries(theta, phi):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*(0.5+np.cos(phi))
X[:, 1] = np.sin(theta)*(0.5+np.cos(phi))
X[:, 2] = np.sin(phi)
u = np.random.randn(3, 1)
u = u/np.sqrt(np.sum(u**2))
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
return (X, d.flatten())
def getKleinTimeSeries(T1, slope, eps = 0.02):
"""
Make a Klein bottle time series
Parameters
----------
T1 : int
The number of samples per period on the circle part
slope : float
Slope of the trajectory along principal domain of the Klein bottle
eps : float
Fuzz close to the boundary in the y direction. Or if negative,
the number of periods to complete
"""
NPeriods = 1.0/slope
N = T1*NPeriods
print("NPeriods = %i, N = %i"%(NPeriods, N))
if eps < 0:
print("Expanding period")
N *= -eps
y = np.linspace(0, np.pi*(-eps), N)
else:
y = np.linspace(0, np.pi, N)
x = np.arange(N)*2*np.pi/T1
if eps > 0:
idx = (y>eps)*(y<np.pi-eps) #Exlude points close to the boundary
x = x[idx]
y = y[idx]
return np.cos(2*x) + np.cos(x)*np.sin(y) + np.cos(y)
def getTorusDistance(x, theta, phi, alpha_theta = 1.0, alpha_phi = 1.0, L1 = False):
"""
Get a distance from points to an observation point x on the torus
Parameters
----------
x : ndarray (2)
Position of observation point (theta, phi)
theta : ndarray (N)
Theta (x) coordinates of points on the flow
phi : ndarray (N)
Phi (y) coordinates of points on the flow
alpha_theta : float
Weight of metric along the x direction
alpha_phi : float
Weight of metric along the y direction
"""
dx = np.abs(x[0]-theta)
dx = np.minimum(dx, np.abs(x[0]+2*np.pi-theta))
dx = np.minimum(dx, np.abs(x[0]-2*np.pi-theta))
dy = np.abs(x[1]-phi)
dy = np.minimum(dy, np.abs(x[1]+2*np.pi-phi))
dy = np.minimum(dy, np.abs(x[1]-2*np.pi-phi))
dx = alpha_theta*dx
dy = alpha_phi*dy
if L1:
dist = dx + dy
else:
dist = np.sqrt(dx**2 + dy**2)
return dist
def getKleinDistance(x1, theta, phi, alpha_theta = 1.0, alpha_phi = 1.0, L1 = False):
"""
Get a distance from points to an observation point x on the Klein bottle, where
the points are specified on its double cover on [0, 2*pi] x [0, 2*pi] and the
identification is [x, y] ~ [x + pi, -y]
x1 : ndarray (2)
Position of observation point on the double cover (theta, phi)
theta : ndarray (N)
Theta (x) coordinates of points on the flow
phi : ndarray (N)
Phi (y) coordinates of points on the flow
alpha_theta : float
Weight of metric along the x direction
alpha_phi : float
Weight of metric along the y direction
"""
x2 = [x1[0]+np.pi, -x1[1]] #Quotient map
x2 = np.mod(x2, 2*np.pi)
d1 = getTorusDistance(x1, theta, phi, alpha_theta, alpha_phi, L1)
d2 = getTorusDistance(x2, theta, phi, alpha_theta, alpha_phi, L1)
return np.minimum(d1, d2)
def intersectSegments2D(A, B, C, D, countEndpoints = True):
"""
Find the intersection of two lines segments in a numerically stable
way by looking at them parametrically
"""
denomDet = (D[0]-C[0])*(A[1]-B[1]) - (D[1]-C[1])*(A[0]-B[0])
if (denomDet == 0): #Segments are parallel
return np.array([])
num_t = (A[0]-C[0])*(A[1]-B[1]) - (A[1]-C[1])*(A[0]-B[0])
num_s = (D[0]-C[0])*(A[1]-C[1]) - (D[1]-C[1])*(A[0]-C[0])
t = float(num_t) / float(denomDet)
s = float(num_s) / float(denomDet)
if (s < 0 or s > 1):
return np.array([]) #Intersection not within the bounds of segment 1
if (t < 0 or t > 1):
return np.array([]) #Intersection not within the bounds of segment 2
#Don't count intersections that occur at the endpoints of both segments
#if the user so chooses
if ((t == 0 or t == 1) and (s == 0 or s == 1) and (not countEndpoints)):
return np.array([])
ret = A + s*(B-A)
return ret
def get2HoledTorusTraj(x0, dx, NPoints):
"""
Come up with a trajectory on the unit octagon representation
of the 2-holed torus
Parameters
----------
x0 : ndarray (2, 1)
Initial position on the 2-holed torus
dx : ndarray (2, 1)
Vector between adjacent points on the trajectory
NPoints : int
Number of points on the trajectory
"""
x0 = np.array(x0)
dx = np.array(dx)
thetas = np.linspace(0, 2*np.pi, 9) - np.pi/8
endpts = np.zeros((9, 2))
endpts[:, 0] = np.cos(thetas)
endpts[:, 1] = np.sin(thetas)
normals = endpts[1::, :] - endpts[0:-1, :]
normals[:, 0], normals[:, 1] = normals[:, 1], -normals[:, 0]
normals = normals/np.sqrt(np.sum(normals**2, 1))[:, None]
width = endpts[0, 0] - endpts[5, 0]
X = [x0]
for i in range(1, NPoints):
x1 = X[i-1]
x2 = x1 + dx
# Check if out of bounds of torus
k = 0
while k < 8:
res = intersectSegments2D(x1, x2, endpts[k, :], endpts[k+1, :])
if res.size > 0:
x1 = res - width*normals[k, :] #Go to other side of octagon
x2 = x1 + (x2 - res)
x1 = x1+1e-10*normals[k, :]
k = 0
continue
k += 1
X.append(x2)
X = np.array(X)
return {'X':X, 'endpts':endpts}
def get2HoledTorusDist(X, x0, endpts):
"""
Compute the distance from a set of points to a chosen point on
the 2-holed torus, using the flat Euclidean metric on the octagon
Parameters
----------
X: ndarray (N, 2)
A set of points inside of the octagon
x0: ndarray (2)
A point to which to measure distances
endpts: ndarray (9, 2)
Endpoints on the octagon model
"""
offsets = endpts[1:9, :] + endpts[0:8, :]
Y = x0 + offsets
Y = np.concatenate((x0[None, :], Y), 0)
XSqr = np.sum(X**2, 1)
YSqr = np.sum(Y**2, 1)
D = XSqr[:, None] + YSqr[None, :] - 2*X.dot(Y.T)
distSqr = np.min(D, 1)
distSqr[distSqr < 0] = 0
return distSqr
def doSphereExample():
np.random.seed(100)
N = 6000
NPeriods = 50
S = np.zeros((N, 3))
theta = np.linspace(0, 2*np.pi*NPeriods, N)
phi = np.pi*np.linspace(-0.5, 0.5, N)
#Observation function
x = getSphereTimeSeries(theta, phi)
#x = getRP2TimeSeries(theta, phi)
#Sliding window
X = getSlidingWindowNoInterp(x, int(N/NPeriods))
Y = plotSlidingWindowResults(x, X)
plt.savefig("SphereTimeSeries.svg", bbox_inches='tight')
Z = np.zeros((Y.shape[0], 4))
Z[:, 0:3] = Y[:, 0:3]
Z[:, 3] = x[0:Z.shape[0]]
savePCOff(Y, "Sphere.off")
def doKleinExample():
x = getKleinTimeSeries(40, 0.05)
plt.plot(x)
plt.show()
if __name__ == '__main__':
x0 = [0.1, 0.1]
dx = 3*np.array([0.02*(1+np.sqrt(5))/2, 0.04])
res = get2HoledTorusTraj(x0, dx, 1000)
endpts, X = res['endpts'], res['X']
c = plt.get_cmap('Spectral')
C = c(np.array(np.round(np.linspace(0, 255, X.shape[0])), dtype=np.int32))
C = C[:, 0:3]
x0 = np.array([0.1, 0.1])
y = get2HoledTorusDist(X, x0, endpts)
plt.subplot(121)
plt.plot(endpts[:, 0], endpts[:, 1])
plt.scatter(x0[0], x0[1], 80, 'k')
plt.scatter(X[:, 0], X[:, 1], 20, c=C)
plt.axis('equal')
plt.subplot(122)
plt.plot(y)
plt.show() | [
"[email protected]"
] | |
4392d1adcce1c93371a6728ecfff29e616948c28 | ec78f8ab63aec0753b9360715a4276a971b78a82 | /py/data_analysis/np/matrix.py | 2df729d2746c2ba43d2ec102e1595d3cf8c1e176 | [] | no_license | anderscui/ml | 4ace7e7b8cf248042d224bd54e81b691963b2e0e | 39238ba6d802df7e8bf1089ef3605cfc83b333ac | refs/heads/master | 2021-06-03T16:09:55.207202 | 2018-11-01T18:50:49 | 2018-11-01T18:50:49 | 23,989,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import numpy as np
arr = np.arange(15).reshape((3, 5))
print(arr.T)
print(arr.transpose())
print(np.dot(arr.T, arr))
# also swapaxes()
| [
"[email protected]"
] | |
b2d0fc494e361edacb2c59246242262a3668aa8e | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc098/a.py | aeece8b578c7feb1792ec03018003d9edab1c62b | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | a, b = map(int, input().split())
print(max(a + b, a - b, a * b)) | [
"[email protected]"
] | |
eb8ca0533b3576c10c7673e10928c10f18803fac | a1ea4bb213801a2f49e9b3d178f402f108d8a803 | /AI(BE)/bullseyes/bullseyes/settings.py | 0cd8bb3f9a00a7080b4c0388a0e00b09b89ddf1f | [
"MIT"
] | permissive | osamhack2021/AI_WEB_Bullseyes_Bullseyes | 537df4c35550917b963442538926c0b4bbef3cd6 | ec6aa6ce093e93b5666a0fd5ede28585c27a3590 | refs/heads/master | 2023-08-18T10:42:24.212460 | 2021-10-20T02:49:35 | 2021-10-20T02:49:35 | 407,145,903 | 4 | 2 | MIT | 2021-10-17T05:23:18 | 2021-09-16T11:54:11 | JavaScript | UTF-8 | Python | false | false | 4,101 | py | """
Django settings for bullseyes project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_&*#@x)6_c7#1y4e65x)+!*75if7gyn4kz469&v2h6aw$om&m3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'bullseyes_server',
'rest_framework.authtoken',
'django_filters',
'corsheaders',
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsPostCsrfMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGIN_REGEXES = [
r".*",
]
#CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
'DEFAULT_FILTER_BACKENDS':['django_filters.rest_framework.DjangoFilterBackend'],
# 'DATE_INPUT_FORMATS': ['iso-8601', '%Y-%m-%dT%H:%M:%S.%fZ'],
}
ROOT_URLCONF = 'bullseyes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bullseyes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
391e199af1fa6be6a64f00ab28750cf11324aad2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02660/s614607751.py | 34c48997173930d6f69893b65345505f7e034156 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | N = int(input())
ans = 0
for i in range(2, N):
if i * i > N:
break
e = 0
while N % i == 0:
e += 1
N //= i
if e > 0:
for j in range(1, 10):
if e >= j:
e -= j
ans += 1
else:
break
if N > 1:
ans += 1
print(ans)
| [
"[email protected]"
] | |
33e87fe2280b584f9fab54d1712d053ca31d4dcd | 4d8d542058f91bc2a1fede92a7ebc614b61aed22 | /environments/mujoco/rand_param_envs/gym/envs/debugging/__init__.py | ebdd5b7c14979e41c5700c705b69cfaddace0c6c | [
"MIT"
] | permissive | NagisaZj/varibad | 9ea940e168fea336457636e33f61400d48a18a27 | df7cda81588c62a2a3bee69e4173228701bd7000 | refs/heads/master | 2023-02-07T15:50:47.912644 | 2020-12-27T01:51:10 | 2020-12-27T01:51:10 | 270,474,411 | 0 | 0 | NOASSERTION | 2020-06-08T00:34:09 | 2020-06-08T00:34:08 | null | UTF-8 | Python | false | false | 552 | py | from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_deterministic_reward import \
OneRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_nondeterministic_reward import \
OneRoundNondeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_deterministic_reward import \
TwoRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_nondeterministic_reward import \
TwoRoundNondeterministicRewardEnv
| [
"[email protected]"
] | |
42ccabfd89b1d00cd7df7184e283bdbb70020766 | 41777d4d219ea97b4632f4a8a31ab6c82a60772c | /kubernetes-stubs/config/incluster_config.pyi | 79a0d4354db777d9f57fa699d05bd128f72a24d2 | [
"Apache-2.0"
] | permissive | gordonbondon/kubernetes-typed | 501d9c998c266386dc7f66f522f71ac3ba624d89 | 82995b008daf551a4fe11660018d9c08c69f9e6e | refs/heads/master | 2023-07-18T12:06:04.208540 | 2021-09-05T19:50:05 | 2021-09-05T19:50:05 | 319,183,135 | 24 | 2 | Apache-2.0 | 2021-09-05T19:50:06 | 2020-12-07T02:34:12 | Python | UTF-8 | Python | false | false | 635 | pyi | # Code generated by `stubgen`. DO NOT EDIT.
from .config_exception import ConfigException as ConfigException
from kubernetes.client import Configuration as Configuration
from typing import Any
SERVICE_HOST_ENV_NAME: str
SERVICE_PORT_ENV_NAME: str
SERVICE_TOKEN_FILENAME: str
SERVICE_CERT_FILENAME: str
class InClusterConfigLoader:
def __init__(self, token_filename, cert_filename, try_refresh_token: bool = ..., environ=...) -> None: ...
def load_and_set(self, client_configuration: Any | None = ...) -> None: ...
def load_incluster_config(client_configuration: Any | None = ..., try_refresh_token: bool = ...) -> None: ...
| [
"[email protected]"
] | |
e325857a904d0df6ed0627ab009f34fc96c74972 | 329cc042bb5829ab26a51d0b3a0bd310f05e0671 | /main.py | 60f84aa47980f4c797b50f2df6697f82314f4908 | [] | no_license | bkhoward/WLC-PSK-Change | 53afe64e767889ce967679d8aeb798745166fa72 | 1b92fd1d5afae4bc64bfc61bc4935c635cca12f0 | refs/heads/master | 2023-03-25T01:33:53.765751 | 2021-03-11T18:59:03 | 2021-03-11T18:59:03 | 345,891,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,263 | py | #!/usr/bin/env python
#
# Author: Brian Howard
# Date: 02Feb2021
# Version: 1.0
# Abstract: Create SSH connection to corporate Cisco WLCs and change the PSK used for ONEguest SSID
# - Find the WLAN_ID for the ONEguest SSID
# - Disable the WLAN_ID for ONEguest
# - Modify the existing PSK for the ONEguest SSID
# - Re-Enable the WLAN_ID for ONEguest
# - Save the config
# - Create logfiles for all SSH transactions
#
# Source Files:
# main.py - main python script
# credentials.py - file to store login credentials
# ios_wlan_id-finder.py - logs into an ios host and finds the WLAN_ID associated with the ONEguest SSID
# aireos_wlan_id-finder.py - logs into an aireos host and finds the WLAN_ID associated with the ONEguest SSID
# host_file.py - python list containing ip addresses of Cisco WLCs
# cmd_file.py - python list containing Cisco commands to run within the script
# Note: 'show run | include hostname' must be element 0
#
# Output Files:
# log.txt - log file containing all information from the SSH channel.
# This is an all inclusive file for all hosts connected to
# {hostname}.txt - each host connected to has an individual log file of commands only.
# this log is not as detailed as the log.txt file.
# ------------------------------------------------------------------------------------------------#
# ------------------------------------------------------------------------------------------------#
# Function definitions
# ------------------------------------------------------------------------------------------------#
import logging
import coloredlogs
from netmiko import ConnectHandler
from ntc_templates.parse import parse_output
from host_file import host
from credentials import credentials
from cmd_file import cmd
from pprint import pprint
##### Begin Logging section #####
# Basic logging allows Netmiko detailed logging of the ssh stream written to a file
logging.basicConfig(filename='log.txt', level=logging.DEBUG, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger('Netmiko')
# Create a console handler object for the console Stream
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# # Create a ColoredFormatter to use as formatter for the Console Handler
formatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
ch.setFormatter(formatter)
# assign console handler to logger
logger.addHandler(ch)
# ##### End Logging section #####
if __name__ == '__main__':
# Capture new PSK
print()
PSK = input("Please enter New PSK: ")
print()
for wlc in host:
logfile = wlc['hostname'] + '.log'
# Netmiko SSH connection
ssh_connect = ConnectHandler(ip=wlc['ip'], username=credentials['username'], password=credentials['password'],
device_type=wlc['device_type'], session_log=logfile)
# Netmiko connection sends show command
# use_textfsm automatically looks in the \venv\Lib\site-packages\ntc_templates\templates directory
# for a template matching the device type + command name to convert the unstructured output of the show
# command to structured data (list of dictionaries)
# Note: ntc_templates and fsmtext are automatically installed with Netmiko
show_wlan_raw = ssh_connect.send_command(cmd['get_wlan'])
show_wlan = parse_output(platform=wlc['device_type'], command="show wlan sum", data=show_wlan_raw)
for wlan in show_wlan:
if wlan['ssid'] == 'ONEguest':
print()
print('*******************************************************************************')
print()
# Connect to host and Show current state of WLANs
logger.critical('Connecting to ' + wlc['hostname'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
# Disable ONEguest WLAN and Show current state of WLANs
logger.critical('Disabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_disable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
# change to wlan profile sub menu for ONEguest SSID and shutdown SSID
# send_config_set automatically enters config mode, executes a list of commands,
# then exits config mode. Note if only one command is in the list it does not stay in config mode
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Change PSK
logger.critical('Changing PSK on ' + wlc['hostname'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_psk'] + ' ' + PSK + ' ' + wlan['wlanid'])
logger.warning('New PSK is: ' + PSK)
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and chnage PSK
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_psk'] + ' ' + PSK])
logger.warning('New PSK is: ' + PSK)
print()
# Enable ONEguest WLAN and Show current state of WLANs
logger.critical('Enabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_enable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and enable it
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_no_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Save Config
logger.critical('Saving Config on host: ' + wlc['hostname'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.save_config(cmd['aireos_save'], confirm_response='y')
print()
print('*******************************************************************************')
print()
else:
ssh_connect.save_config()
print()
print('*******************************************************************************')
print()
ssh_connect.disconnect()
| [
"[email protected]"
] | |
102450eccb8fcad7b0362df30fb062da3054d97a | 779291cb83ec3cab36d8bb66ed46b3afd4907f95 | /migration/rnaseq-wf_cleanup.py | 7a26326b9cd9e55a223e034fa72a8b9827c72f1c | [] | no_license | Shengqian95/ncbi_remap | ac3258411fda8e9317f3cdf951cc909cc0f1946e | 3f2099058bce5d1670a672a69c13efd89d538cd1 | refs/heads/master | 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,369 | py | import os
import re
import shutil
from pathlib import Path
CLEAN_UP = os.environ.get("CLEAN_UP", False)
SRR_PATTERN = re.compile(r"^[SED]RR\d+$")
TARGETS = [
"../output/rnaseq-wf/aln_stats/{srx}.parquet",
"../output/rnaseq-wf/gene_counts/{srx}.parquet",
"../output/rnaseq-wf/junction_counts/{srx}.parquet",
"../output/rnaseq-wf/intergenic_counts/{srx}.parquet",
"../output/rnaseq-wf/segment_counts/{srx}.parquet",
"../output/rnaseq-wf/fusion_counts/{srx}.parquet",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.first.bw",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.second.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.first.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.second.bw",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bai",
]
def main():
for srx_path in Path("../output/rnaseq-wf/samples").iterdir():
srx = srx_path.name
remove_temp(srx)
if (
Path(f"../output/rnaseq-wf/atropos_bad/{srx}").exists()
or Path(f"../output/rnaseq-wf/alignment_bad/{srx}").exists()
):
remove_srx_folder(srx)
continue
if all(check_target(target.format(srx=srx)) for target in TARGETS):
Path(f"../output/rnaseq-wf/done/{srx}").touch()
remove_srr_folders(srx)
remove_processed_files(srx)
remove_misc_files(srx)
def remove_temp(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").glob("*.tmp"):
pth.unlink()
def remove_srx_folder(srx: str):
pth = Path(f"../output/rnaseq-wf/samples/{srx}")
if pth.exists() and CLEAN_UP:
shutil.rmtree(pth)
elif pth.exists():
print("Removing SRX Folder:", pth, sep="\t")
def check_target(file_name: str):
if Path(file_name).exists():
return True
print("Missing Target:", file_name, sep="\t")
def remove_srr_folders(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").iterdir():
if pth.is_dir() and re.match(SRR_PATTERN, pth.name):
if CLEAN_UP:
shutil.rmtree(pth)
else:
print("Removing SRR Folder:", pth, sep="\t")
def remove_file(file_name: str):
pth = Path(file_name)
if pth.exists() and CLEAN_UP:
pth.unlink()
elif pth.exists():
print("Removing File:", pth, sep="\t")
def remove_processed_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bamtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts")
def remove_misc_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.trim.clean.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.hisat2.bam.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.idxstats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.log")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9b8e1d93b2f68bc34a67adea4f49a273d934c106 | a6b8263a42b96f317b818b3ba7e45bb8cb4458f6 | /shipsnake/__main__.py | e0db2bc14affd6ceb307dfe465e53a7f63042a48 | [
"MIT"
] | permissive | cole-wilson/test-ship | 5002add3b7f84162a064fcc4496f82a512fe4ff3 | 95f2ff585efd7564e60caad9a4806939923bc525 | refs/heads/master | 2023-01-30T01:52:55.111219 | 2020-12-07T05:18:12 | 2020-12-07T05:18:12 | 319,211,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | if __name__ != '__main__':
print("Please run shipsnake as script or command, not module.")
import toml
import os
import sys
import glob
import shutil
# mode = sys.argv[1]
# mode="upload"
version = ""
if len(sys.argv) == 1:
print("Provide a mode:\n\tshipsnake [wizard | build | dev | upload]")
sys.exit(0)
mode = sys.argv[1]
if len(sys.argv) < 3 and mode in ["upload",'build']:
print("Provide a version:\n\tshipsnake "+mode+" <version>")
sys.exit(0)
if len(sys.argv)>2:
version = sys.argv[2]
if mode=="dev" and version=="":
version = "dev_build"
if os.getenv('TEST_SNAKE')=="TRUE":
os.chdir('tester')
if mode == "wizard":
import wizard
wizard.main()
elif mode in ["build","dev","upload"]:
open('.'+os.sep+'.gitignore','w+').write('*'+os.sep+'__pychache__')
if not os.path.isfile('.'+os.sep+'shipsnake.toml'):
print('Please create a config file with `shipsnake wizard` first.')
sys.exit(0)
with open('.'+os.sep+'shipsnake.toml') as datafile:
data = toml.loads(datafile.read())
with open(prefix+os.sep+'setup.py.template') as datafile:
template = datafile.read()
setup = template.format(
**data,
version = version,
entry_points = [data["short_name"]+"="+data["short_name"]+".__main__"] if data["file"]!="" else [""]
)
open('setup.py','w+').write(setup)
source_dir = os.getcwd()
target_dir = data["short_name"]+os.sep
types = ('*.py',*data["data_files"])
file_names = []
for files in types:
file_names.extend(glob.glob(files))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for file_name in file_names:
if file_name in ["setup.py","shipsnake.toml"]:
continue
shutil.move(os.path.join(source_dir, file_name), target_dir+os.sep+file_name)
open(target_dir+'__init__.py','w+').write('')
if data['file']!="" and not os.path.isfile(data['short_name']+os.sep+'__main__.py'):
try:
os.rename(data['short_name']+os.sep+data['file'],data['short_name']+os.sep+'__main__.py')
open(data['short_name']+os.sep+data['file'],'w+').write('# Please edit __main__.py for the main code. Thanks!\n(you can delete this file.)')
except FileNotFoundError:
pass
try:
shutil.rmtree('dist')
except:
pass
try:
os.mkdir('bin')
except:
pass
open("bin"+os.sep+data['short_name'],'w+').write(f"#!"+os.sep+"usr"+os.sep+"bin"+os.sep+f"env bash\npython3 -m {data['short_name']} $@ || echo 'Error. Please re-install shipsnake with:\\n`pip3 install shipsnake --upgrade`'")
if mode == "build" or mode=="upload":
os.system('python3 .'+os.sep+'setup.py sdist bdist_wheel')
try:
shutil.rmtree('build')
except:
pass
elif mode == "dev":
os.system('python3 .'+os.sep+'setup.py develop')
for x in glob.glob('*.egg-info'):
shutil.rmtree(x)
else:
print(f'Illegeal option `{mode}`')
sys.exit(0)
if mode=="upload":
print("Please make sure that you have a https://pypi.org/ account.")
try:
import twine
except:
input('Press enter to continue installing `twine`. Press ctrl+x to exit.')
os.system('python3 -m pip install --user --upgrade twine || python3 -m pip install --upgrade twine')
os.system('python3 -m twine upload dist'+os.sep+'*')
| [
"[email protected]"
] | |
199390424fddb7b89b231c304224800f4fb4fb79 | 3a1c1373d8f1617485893dea46323c9d07dedc4d | /python_algo/프로그래머스/20210429_다리를 지나는 트럭.py | b1fd3a58b2856feedb64be0b86420485b12daf0c | [] | no_license | GaYoung87/Algorithm | 28b95c3eed054454a06a14d1a255ea1d57486b22 | 59abce98ff14879bc88b72ef2e562ce55dae5335 | refs/heads/master | 2023-08-31T07:52:31.487648 | 2023-08-29T15:09:04 | 2023-08-29T15:09:04 | 199,405,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | def solution(bridge_length, weight, truck_weights):
bridge_sum = 0
bridge_count = 0
time = 0
while truck_weights:
x = truck_weights[0]
if bridge_count <= bridge_length:
if bridge_sum + x <= weight:
bridge_count += 1
return time | [
"[email protected]"
] | |
b86b6586ed3da7fa83a3a45383ce369cf1633df0 | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch17_shortest_path/q39_2.py | 841ee2b4be8737f7003f3b85123dcedfc6c83627 | [] | no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | """
화성 탐사 - 3회차
"""
# 풀이 제한 시간 : 40분
# 2021/01/26 16:06 ~ 16:16
# 정답!
import heapq
import sys
input = sys.stdin.readline
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
for tc in range(int(input())):
n = int(input())
graph = []
for _ in range(n):
graph.append(list(map(int, input().split())))
INF = int(1e9)
distance = [[INF] * n for _ in range(n)]
distance[0][0] = graph[0][0]
q = []
heapq.heappush(q, (graph[0][0], 0, 0))
while q:
dist, r, c = heapq.heappop(q)
if distance[r][c] < dist:
continue
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if nr < 0 or nr >= n or nc < 0 or nc >= n:
continue
cost = dist + graph[nr][nc]
if cost < distance[nr][nc]:
distance[nr][nc] = cost
heapq.heappush(q, (cost, nr, nc))
print(distance[n-1][n-1]) | [
"[email protected]"
] | |
c45eb5f1c3777c3c501733e0224bf45deaa1c22e | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /waf-openapi-20190910/setup.py | ecd45e94dd0948313db72333337965dd00c423a0 | [
"Apache-2.0"
] | permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_waf-openapi20190910.
Created on 25/04/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_waf_openapi20190910"
NAME = "alibabacloud_waf-openapi20190910" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud waf-openapi (20190910) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","waf","openapi20190910"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"[email protected]"
] | |
c642bda474582d7a38bff7dcb5c49dbe6fc93d0c | 0b9470f9a839d87b21fd575421b5223afb4573c6 | /07day/01-捕获输入异常.py | 676dc64ff5bbe99306b430ca519aeb1cedf9871d | [] | no_license | ZiHaoYa/1808 | 351356b4fa920a5075899c8abdce24a61502097f | 891582547fef4c6fd4fd4132da033e48e069901f | refs/heads/master | 2020-03-30T06:20:46.898840 | 2018-09-29T08:56:53 | 2018-09-29T08:56:53 | 147,603,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | try:
number = int(input("请输入一个"))
except Exception as ret:
print("输入有误")
print(ret)
number = input("请输入一个")
if number.isdigit():
print("出数字")
number = int(number)
else:
print("输入有误")
| [
"[email protected]"
] | |
1d034b6b06e94315ceda06e8a8cc67681b8b3e9e | 6a7d8b67aad59c51dafdfb8bcffd53864a3d65b0 | /LeetCode/toeplitzMatrix.py | ef8e74f26346b2369d234c7f7ba1f11b002541a5 | [] | no_license | dicao425/algorithmExercise | 8bba36c1a08a232678e5085d24bac1dbee7e5364 | 36cb33af758b1d01da35982481a8bbfbee5c2810 | refs/heads/master | 2021-10-07T08:56:18.030583 | 2018-12-04T05:59:17 | 2018-12-04T05:59:17 | 103,611,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python
import sys
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
m = len(matrix)
n = len(matrix[0])
for l in range(m-1):
if matrix[l][:n-1] != matrix[l+1][1:]:
return False
return True
def main():
aa = Solution()
return 0
if __name__ == "__main__":
sys.exit(main()) | [
"[email protected]"
] | |
e8e98afdde5ea6d6625b1b737aa624cfc45ca24c | 386a5b505d77c9798aaab78495d0f00c349cf660 | /Prognos Project/Working/Latiket Jaronde Git/DJango examples/DynamicUrls/DynamicUrls/urls.py | 35264c0f4b24f8a9bef2b8a7b45683a883428d3a | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 801 | py | """DynamicUrls URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("urlDemo.urls")),
]
| [
"[email protected]"
] | |
0a6f6a24a5c718849def667cd7b9fda3075dad7b | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D05B/REBORDD05BUN.py | cff49e05af4ddd814da48c06f274ce8bd1247a17 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,871 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD05BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'GEI', MIN: 1, MAX: 6},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9},
]},
{ID: 'DTM', MIN: 1, MAX: 6},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'ARD', MIN: 1, MAX: 999, LEVEL: [
{ID: 'CUX', MIN: 1, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'RFF', MIN: 1, MAX: 9},
{ID: 'REL', MIN: 1, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 7},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 7},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'PCD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'GEI', MIN: 0, MAX: 2},
{ID: 'PCD', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'CUX', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'PCD', MIN: 0, MAX: 3},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
d08dcdc3b0c9dc63dfaf73fa44457a1d7af97a27 | 65b6e843df4c2e8b9abed79b33be24eba1686fa2 | /absence/wsgi.py | 2d6983105aada3e12ea2bdcaac1b9b198f064d05 | [] | no_license | The-Super-Stack/abs-backend | 563fba90b36f45a0bac82aa5ace7c7d079309b09 | d9335ec0a9fe9fdfa1d416d8277c11c2ac23cb5a | refs/heads/main | 2023-08-14T13:33:05.631317 | 2021-10-11T06:18:44 | 2021-10-11T06:18:44 | 415,801,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for absence project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'absence.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
e005b0cdb5dbaf2b1440c37359feb6c7e0913af5 | 0a0693bed6e81febdee9502ebde4ee1156b105e6 | /venv/Scripts/pip-script.py | 916f0b6ff9d40eb7462fc7c5097aa1c89d6bf8c5 | [] | no_license | laoyouqing/sms_middleground | 3755d206bfa9ade24a94f1981cb60c0b393e3767 | dbafd3410802135f13e68de43cbc5b0246cb981f | refs/heads/master | 2022-12-11T03:52:51.737971 | 2019-08-21T05:59:55 | 2019-08-21T05:59:55 | 203,516,088 | 0 | 0 | null | 2022-12-08T05:20:56 | 2019-08-21T05:55:43 | Python | UTF-8 | Python | false | false | 392 | py | #!E:\sms_middleground\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
9422d64a7fd3b7c00aadf3fc9f3fb39087611d8b | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/qosm/ingrpktshist1qtr.py | 52d1c6228888f3285d4b421d6b525367a8d9933a | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,745 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IngrPktsHist1qtr(Mo):
"""
A class that represents historical statistics for ingress packets in a 1 quarter sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.qosm.IngrPktsHist1qtr", "ingress packets")
counter = CounterMeta("drop", CounterCategory.COUNTER, "packets", "ingress drop packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "dropCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "dropPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "dropRate"
meta._counters.append(counter)
counter = CounterMeta("admit", CounterCategory.COUNTER, "packets", "ingress admit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "admitCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "admitPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "admitMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "admitMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "admitAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "admitSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "admitThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "admitTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "admitRate"
meta._counters.append(counter)
meta.moClassName = "qosmIngrPktsHist1qtr"
meta.rnFormat = "HDqosmIngrPkts1qtr-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical ingress packets stats in 1 quarter"
meta.writeAccessMask = 0x100000000000001
meta.readAccessMask = 0x100000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.qosm.IfClass")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.qosm.IngrPktsHist")
meta.rnPrefixes = [
('HDqosmIngrPkts1qtr-', True),
]
prop = PropMeta("str", "admitAvg", "admitAvg", 10928, PropCategory.IMPLICIT_AVG)
prop.label = "ingress admit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitAvg", prop)
prop = PropMeta("str", "admitCum", "admitCum", 10924, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress admit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("admitCum", prop)
prop = PropMeta("str", "admitMax", "admitMax", 10927, PropCategory.IMPLICIT_MAX)
prop.label = "ingress admit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitMax", prop)
prop = PropMeta("str", "admitMin", "admitMin", 10926, PropCategory.IMPLICIT_MIN)
prop.label = "ingress admit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitMin", prop)
prop = PropMeta("str", "admitPer", "admitPer", 10925, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress admit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("admitPer", prop)
prop = PropMeta("str", "admitRate", "admitRate", 10932, PropCategory.IMPLICIT_RATE)
prop.label = "ingress admit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("admitRate", prop)
prop = PropMeta("str", "admitSpct", "admitSpct", 10929, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress admit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("admitSpct", prop)
prop = PropMeta("str", "admitThr", "admitThr", 10930, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress admit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("admitThr", prop)
prop = PropMeta("str", "admitTr", "admitTr", 10931, PropCategory.IMPLICIT_TREND)
prop.label = "ingress admit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("admitTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "dropAvg", "dropAvg", 10955, PropCategory.IMPLICIT_AVG)
prop.label = "ingress drop packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropAvg", prop)
prop = PropMeta("str", "dropCum", "dropCum", 10951, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress drop packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("dropCum", prop)
prop = PropMeta("str", "dropMax", "dropMax", 10954, PropCategory.IMPLICIT_MAX)
prop.label = "ingress drop packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMax", prop)
prop = PropMeta("str", "dropMin", "dropMin", 10953, PropCategory.IMPLICIT_MIN)
prop.label = "ingress drop packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMin", prop)
prop = PropMeta("str", "dropPer", "dropPer", 10952, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress drop packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPer", prop)
prop = PropMeta("str", "dropRate", "dropRate", 10959, PropCategory.IMPLICIT_RATE)
prop.label = "ingress drop packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("dropRate", prop)
prop = PropMeta("str", "dropSpct", "dropSpct", 10956, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress drop packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropSpct", prop)
prop = PropMeta("str", "dropThr", "dropThr", 10957, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress drop packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropThr", prop)
prop = PropMeta("str", "dropTr", "dropTr", 10958, PropCategory.IMPLICIT_TREND)
prop.label = "ingress drop packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropTr", prop)
prop = PropMeta("str", "index", "index", 7102, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
dd84a5790a2a78e6f48019faaa8ff6e1469c0763 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/werkzeug/wrappers/accept.py | 9605e637dc682aa6fb376053cb9a80387c566377 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 429 | py | import typing as t
import warnings
class AcceptMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AcceptMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs) # type: ignore
| [
"[email protected]"
] | |
94f2d43841d6de8c61172de178f2cf83ea40e303 | b8a3e758eff2922ff6abc77947d879e3f6d1afa3 | /ws_moveit/build/moveit_resources/catkin_generated/pkg.develspace.context.pc.py | 1db6f9fe77fc24ca7b6f4cd26bc3b8b329be1584 | [] | no_license | rrowlands/ros-baxter-coffee | ab7a496186591e709f88ccfd3b9944428e652f3e | 32473c3012b7ec4f91194069303c85844cf1aae7 | refs/heads/master | 2016-09-05T20:58:20.428241 | 2013-12-02T23:10:44 | 2013-12-02T23:10:44 | 14,313,406 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include".split(';') if "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_resources"
PROJECT_SPACE_DIR = "/home/p/peth8881/robotics/ws_moveit/devel"
PROJECT_VERSION = "0.5.0"
| [
"[email protected]"
] | |
7617908ed4d5cf7e73e7be822d4dd57e8b9b26c4 | 6e1549257568a0ca81b3fc5864e2e1fa65171b06 | /salarydk/models/inline_object84.py | d01ba31c4e2de0ed3ff37022127b34bb38a3e313 | [] | no_license | tdwizard/salarydk | 19d3453de8fbdd886a0189dbf232f98de971e18a | dcf5040101b3e576f1068ea104148651e5c66511 | refs/heads/master | 2023-08-05T05:40:09.561288 | 2021-09-24T09:41:43 | 2021-09-24T09:41:43 | 409,910,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,024 | py | # coding: utf-8
"""
Salary.dk API
This is the public API for Salary.dk. # General Our API is a JSON-based, REST-like API. Our webapp uses the exact same API, so everything you can do in our webapp, you can do through our API. However, we are slowly opening up the API, so not all endpoints are documented here yet. Only the endpoints documented here are stable. If there is some functionality you would like to access through our API, please contact us. The API is located at https://api.salary.dk. All requests must use TLS. In order to use the API on behalf of other users than yourself, you need to register as an API client. You do this by sending an e-mail to [email protected] with the name and purpose of your client. API-keys for each account can be obtained once logged in to Salary, under the settings for the Company. All endpoints are documented to be able to return the 500 error code. We strive to not return this error code, so if you do encounter this error code, it might mean there is an error on our side. In this case, do not hesitate to contact us. # Versioning, upgrade and deprecation policy Our API might change over time. In order to ensure a stable API, we follow these rules when changing the API. New fields might be added at any time to any response or as non-required parameters to any input. When adding input fields, we ensure the default behaviour when not supplying the field is the same as the previous version. In these cases, the version of an endpoint is not increased, since it is backwards compatible. Since we might add new fields to responses, be sure to use a JSON parser in your implementation. This ensures that any extra fields added are ignored by your implementation. We might add entirely new endpoints at any time. If we need to change an existing endpoint without being able to make it backwards compatible, we will add a new version of the endpoint, and mark the old as deprecated but still functional. We will then contact any users of the deprecated endpoint and ensure an upgrade is performed. Once all consumers have moved to the new endpoint version, the old one will be removed. We will not at any point change the meaning of any existing field, nor will we remove any field or endpoint without following the above deprecated procedure. However, we might add new types to existing enums at any time. # Cross-Origin Resource Sharing This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/) - and that allows cross-domain communication from the browser. All responses have a wildcard same-origin which makes them completely public and accessible to everyone, including any code on any site, as long as the proper access token is passed. # Authentication All request require an access token. There are two ways to obtain an access token: * Logging in as a user. (this endpoint is not yet publicly available). * Using an API-key: [endpoint](#operation/APIClientLogin) Using one of these methods, you will obtain an access token. In all subsequest requests, this access token should be passed in the Authorization header. The access token is valid for around one hour, after which a new token should be obtained. You do not need to dispose of access tokens once created. They have a limited lifetime, and Salary.dk will automatically expire old ones. For some endpoints, the authorizedUserQuery security definition is used. This allows for passing the access token as a query parameter where it is not possible to pass it as a header. In particular, this is used for downloading files. <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from salarydk.configuration import Configuration
class InlineObject84(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'company_id': 'str',
'force': 'bool',
'month': 'date'
}
attribute_map = {
'company_id': 'companyID',
'force': 'force',
'month': 'month'
}
def __init__(self, company_id=None, force=None, month=None, local_vars_configuration=None): # noqa: E501
"""InlineObject84 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._company_id = None
self._force = None
self._month = None
self.discriminator = None
self.company_id = company_id
if force is not None:
self.force = force
self.month = month
@property
def company_id(self):
"""Gets the company_id of this InlineObject84. # noqa: E501
The company to perform a zero tax report for. # noqa: E501
:return: The company_id of this InlineObject84. # noqa: E501
:rtype: str
"""
return self._company_id
@company_id.setter
def company_id(self, company_id):
"""Sets the company_id of this InlineObject84.
The company to perform a zero tax report for. # noqa: E501
:param company_id: The company_id of this InlineObject84. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and company_id is None: # noqa: E501
raise ValueError("Invalid value for `company_id`, must not be `None`") # noqa: E501
self._company_id = company_id
@property
def force(self):
"""Gets the force of this InlineObject84. # noqa: E501
If true, force a zero tax report to run, even though internal checking says it should not run. # noqa: E501
:return: The force of this InlineObject84. # noqa: E501
:rtype: bool
"""
return self._force
@force.setter
def force(self, force):
"""Sets the force of this InlineObject84.
If true, force a zero tax report to run, even though internal checking says it should not run. # noqa: E501
:param force: The force of this InlineObject84. # noqa: E501
:type: bool
"""
self._force = force
@property
def month(self):
"""Gets the month of this InlineObject84. # noqa: E501
The date for the 1st of the month to perform a zero tax report for. # noqa: E501
:return: The month of this InlineObject84. # noqa: E501
:rtype: date
"""
return self._month
@month.setter
def month(self, month):
"""Sets the month of this InlineObject84.
The date for the 1st of the month to perform a zero tax report for. # noqa: E501
:param month: The month of this InlineObject84. # noqa: E501
:type: date
"""
if self.local_vars_configuration.client_side_validation and month is None: # noqa: E501
raise ValueError("Invalid value for `month`, must not be `None`") # noqa: E501
self._month = month
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineObject84):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineObject84):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
4b2039b2583b2258d2f0fea69a7ad4fcde28256d | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/appcenter_sdk/models/AzureSubscriptionPatchRequest.py | 12120015c7f4061d120ebe159a0c58a00ab14fa1 | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 3,336 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class AzureSubscriptionPatchRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_billing': 'boolean'
}
attribute_map = {
'is_billing': 'is_billing'
}
def __init__(self, is_billing=None): # noqa: E501
"""AzureSubscriptionPatchRequest - a model defined in Swagger""" # noqa: E501
self._is_billing = None
self.discriminator = None
self.is_billing = is_billing
@property
def is_billing(self):
"""Gets the is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
If the subscription is used for billing # noqa: E501
:return: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:rtype: boolean
"""
return self._is_billing
@is_billing.setter
def is_billing(self, is_billing):
"""Sets the is_billing of this AzureSubscriptionPatchRequest.
If the subscription is used for billing # noqa: E501
:param is_billing: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:type: boolean
"""
if is_billing is None:
raise ValueError("Invalid value for `is_billing`, must not be `None`") # noqa: E501
self._is_billing = is_billing
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AzureSubscriptionPatchRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1d4ebcce0118f05541c3c6d3e01ae58b51dcc55a | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/certificate_object_global_rulestack_delete_maximum_set_gen.py | 1f902b1ba35bfded952bc53f1fceaa215a018896 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,697 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python certificate_object_global_rulestack_delete_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.certificate_object_global_rulestack.begin_delete(
global_rulestack_name="praval",
name="armid1",
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Delete_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6b3f498770a1dfc3845ef9db19d864e6ef3dbe55 | f98a2875e0cdc84341fe8e37b11336368a257fe7 | /agents/product.py | a86acbd28d5e0c5f8555128eb791223b5eb52c56 | [
"MIT"
] | permissive | anhnguyendepocen/PolicySpace2 | eaa83533b7ad599af677ce69353841e665b447d0 | d9a450e47651885ed103d3217dbedec484456d07 | refs/heads/master | 2023-08-28T04:55:40.834445 | 2021-10-21T18:50:03 | 2021-10-21T18:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py |
class Product:
def __init__(self, product_id, quantity, price):
self.product_id = product_id
self.quantity = quantity
self.price = price
def __repr__(self):
return 'Product ID: %d, Quantity: %d, Price: %.1f' % (self.product_id, self.quantity, self.price)
| [
"[email protected]"
] | |
934c1811d723d3bdea5bbf35168370e4e8d8215e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03145/s047914434.py | 5d829eaafb0ffcbf83b9454c4e85b5e4fd118c6a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | a,b,c=map(int,input().split())
aa=min(a,b)
bb=min(c,max(a,b))
print((aa*bb)//2)
| [
"[email protected]"
] | |
d8b55cb94184067e2e3d57f95ab20936d5d86e5e | c200119f4180ddc17dcaeb87d8bad6399442a529 | /tests/src/miniblog/settings.py | 4a49139d88b610ef74757746972a748e912302d5 | [] | no_license | marekmalek/django-observer | 3f4ae6ba1482f649d4495a95b95d4ec74f8222f2 | 3b9e4aeaaa9cd4cc4af7a245a185fb18e89e181a | refs/heads/master | 2021-01-18T06:59:34.588359 | 2012-08-31T16:31:25 | 2012-08-31T16:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,009 | py | # Django settings for weblog project.
import os
import sys
ROOT=os.path.join(os.path.dirname(__file__), '../../')
app_path=os.path.realpath(os.path.join(ROOT, '../'))
if app_path not in sys.path:
sys.path.insert(0, app_path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT, 'database.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4et6(22#@lgie4wogk)6um6^jklpkk0!z-l%uj&kvs*u2xrvfj%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'miniblog.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'miniblog.autocmd',
'miniblog.blogs',
)
FIXTURE_DIRS = (
os.path.join(ROOT, 'fixtures'),
)
LOGIN_REDIRECT_URL = '/'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
] | |
cb0fbdb98f51edb323e77ac971a051e4e5dbf795 | 3cda2dc11e1b7b96641f61a77b3afde4b93ac43f | /test/training_service/config/metrics_test/trial.py | 43e3ac1b4d66f7bd96f307c7314cbfb226ab1cdc | [
"MIT"
] | permissive | Eurus-Holmes/nni | 6da51c352e721f0241c7fd26fa70a8d7c99ef537 | b84d25bec15ece54bf1703b1acb15d9f8919f656 | refs/heads/master | 2023-08-23T10:45:54.879054 | 2023-08-07T02:39:54 | 2023-08-07T02:39:54 | 163,079,164 | 3 | 2 | MIT | 2023-08-07T12:35:54 | 2018-12-25T12:04:16 | Python | UTF-8 | Python | false | false | 818 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import json
import argparse
import nni
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dict_metrics", action='store_true')
args = parser.parse_args()
if args.dict_metrics:
result_file = 'expected_metrics_dict.json'
else:
result_file = 'expected_metrics.json'
nni.get_next_parameter()
with open(result_file, 'r') as f:
m = json.load(f)
time.sleep(5)
for v in m['intermediate_result']:
time.sleep(1)
print('report_intermediate_result:', v)
nni.report_intermediate_result(v)
time.sleep(1)
print('report_final_result:', m['final_result'])
nni.report_final_result(m['final_result'])
print('done')
| [
"[email protected]"
] | |
b0504f6fbbe712366d92f283d5cbb43334f0bf11 | e4cbd82358ba5e8b4d4bacefa054e4ecda2d1517 | /config/settings_base.py | 622ff272b323de19ec535a9f28658818391172f6 | [] | no_license | mziegler/UssdDjangoDemo | a69ca95010443e5925fdf181904da05e9938bcc3 | 9b29eb562a7832aa6a033daf1bee8d99746ee93b | refs/heads/master | 2020-07-21T18:16:40.325034 | 2017-07-01T00:42:40 | 2017-07-01T00:42:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | """
Django settings for djangoUSSD project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(PROJECT_DIR, ...)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_DIR = os.path.join(PROJECT_DIR,'config')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q@^7+k@94i7&x58y(czx*&zw7g+x2i!7%hwmj^fr$qey(a^%e9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Site Apps
'UssdHttp',
'UssdDemo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['UssdHttp/simulator/static'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONFIG_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Session Settings
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR,'UssdHttp/simulator/static'),
)
| [
"[email protected]"
] | |
2d507377a10d3350cc729739daf540151c9c4dc8 | 2e4169290bf115e62cebe1a51ce1dc1528bc2cd2 | /trunk/vlist/vlist.py | 703c11e359dcaf87b186f2001be0c4794c72d3e8 | [] | no_license | BGCX067/ezwidgets-svn-to-git | 6c96bb408369316d395f6c8836b8e7be063ae0d8 | 2864f45bc3e9d87b940b34d0fa6ce64e712c2df8 | refs/heads/master | 2021-01-13T09:49:25.511902 | 2015-12-28T14:19:53 | 2015-12-28T14:19:53 | 48,833,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,324 | py | #----------------------------------------------------------------------------
# Name: vlist.py
# Purpose: virtual list with mix-in ColumnSorter class
#
# Author: Egor Zindy
#
# Created: 26-June-2005
# Licence: public domain
#----------------------------------------------------------------------------
import wx
import wx.lib.mixins.listctrl as listmix
class VirtualList(wx.ListCtrl, listmix.ColumnSorterMixin):
def __init__(self, parent,columns,style=0):
wx.ListCtrl.__init__( self, parent, -1,
style=wx.LC_REPORT|wx.LC_VIRTUAL|style)
listmix.ColumnSorterMixin.__init__(self, len(columns))
self.itemDataMap={}
self.il = wx.ImageList(16, 16)
self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il_symbols={}
#adding some art (sm_up and sm_dn are used by ColumnSorterMixin
#symbols can be added to self.il using SetSymbols
symbols={"sm_up":wx.ART_GO_UP,"sm_dn":wx.ART_GO_DOWN}
self.SetSymbols(symbols)
#building the columns
self.SetColumns(columns)
#---------------------------------------------------
# These methods are callbacks for implementing the
# "virtualness" of the list...
def OnGetItemText(self, item, col):
index=self.itemIndexMap[item]
s = self.itemDataMap[index][col]
return s
def OnGetItemImage(self, item):
return -1
def OnGetItemAttr(self, item):
return None
#---------------------------------------------------
# These methods are Used by the ColumnSorterMixin,
# see wx/lib/mixins/listctrl.py
def GetListCtrl(self):
return self
def GetSortImages(self):
return self.il_symbols["sm_dn"],self.il_symbols["sm_up"]
def SortItems(self,sorter=None):
r"""\brief a SortItem which works with virtual lists
The sorter is not actually used (should it?)
"""
#These are actually defined in ColumnSorterMixin
#col is the column which was clicked on and
#the sort flag is False for descending (Z->A)
#and True for ascending (A->Z).
col=self._col
#creating pairs [column item defined by col, key]
items=[]
for k,v in self.itemDataMap.items():
items.append([v[col],k])
#sort the pairs by value (first element), then by key (second element).
#Multiple same values are okay, because the keys are unique.
items.sort()
#getting the keys associated with each sorted item in a list
k=[key for value, key in items]
#False is descending (starting from last)
if self._colSortFlag[col]==False:
k.reverse()
#storing the keys as self.itemIndexMap (is used in OnGetItemText,Image,ItemAttr).
self.itemIndexMap=k
#redrawing the list
self.Refresh()
#---------------------------------------------------
# These methods should be used to interact with the
# controler
def SetItemMap(self,itemMap):
r"""\brief sets the items to be displayed in the control
\param itemMap a dictionary {id1:("item1","item2",...), id2:("item1","item2",...), ...} and ids are unique
"""
l=len(itemMap)
self.itemDataMap=itemMap
self.SetItemCount(l)
#This regenerates self.itemIndexMap and redraws the ListCtrl
self.SortItems()
def SetColumns(self,columns):
r"""\brief adds columns to the control
\param columns a list of columns (("name1",width1),("name2",width2),...)
"""
i=0
for name,s in columns:
self.InsertColumn(i, name)
self.SetColumnWidth(i, s)
i+=1
def SetSymbols(self,symbols,provider=wx.ART_TOOLBAR):
r"""\brief adds symbols to self.ImageList
Symbols are provided by the ArtProvider
\param symbols a dictionary {"name1":wx.ART_ADD_BOOKMARK,"name2":wx.ART_DEL_BOOKMARK,...}
\param provider an optional provider
"""
for k,v in symbols.items():
self.il_symbols[k]=self.il.Add(wx.ArtProvider_GetBitmap(v,provider,(16,16)))
| [
"[email protected]"
] | |
5f7b5a15c9442a8a6d69e574837dd9b9db1641db | 329bf886f90cdcc5b083d2ab47c529f5df95767b | /survey/views.py | 7a2375bebe6f6c999d7383dd539267dda614e1e5 | [] | no_license | leliel12/otree_saral | f4a16073479836df36789a58a311a8dc0e2fd7f5 | d4c91e1b9451460a656f270fe9f540bf811a9a32 | refs/heads/master | 2021-01-10T08:39:39.278589 | 2015-10-26T00:53:29 | 2015-10-26T00:53:29 | 43,258,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # -*- coding: utf-8 -*-
from __future__ import division
from . import models
from ._builtin import Page, WaitPage
from otree.common import Currency as c, currency_range
from .models import Constants
class Question(Page):
form_model = models.Player
form_fields = ["name", "age", "email", "gender", "major",
"location_of_your_partners_influence_your_decisions",
"working_in_a_location_of_their_choice_more_less_to_the_team",
"partners_in_location_their_choice_worked_harder_than_the_lab",
"I_work_best_in", "risks_in_everyday_life", "risks_in_financial_decision"]
page_sequence = [Question]
| [
"[email protected]"
] | |
879d3e52d3b63ee8f078a3a5f876d4b96ca5aba3 | 3dc60bbcb27600ffe7baa4e6187fe2c71bb7b5ab | /Python/to-lower-case.py | ca69091671f3380ba24c1920aca7d39718fe6f48 | [
"MIT"
] | permissive | phucle2411/LeetCode | 33f3cc69fada711545af4c7366eda5d250625120 | ba84c192fb9995dd48ddc6d81c3153488dd3c698 | refs/heads/master | 2022-01-14T16:49:50.116398 | 2019-06-12T23:41:29 | 2019-06-12T23:41:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | # https://leetcode.com/problems/to-lower-case/submissions/
class Solution:
def toLowerCase(self, str):
"""
:type str: str
:rtype: str
"""
return str.lower()
| [
"[email protected]"
] | |
d0cfe89b6ef336648599b637bbfbfa48e759b3f5 | 19cec240505e27546cb9b10104ecb16cc2454702 | /linux/test/python/stat.py | 92948f7462ef0f06b81fcd9bffeaa35ac9a7e81c | [] | no_license | imosts/flume | 1a9b746c5f080c826c1f316a8008d8ea1b145a89 | a17b987c5adaa13befb0fd74ac400c8edbe62ef5 | refs/heads/master | 2021-01-10T09:43:03.931167 | 2016-03-09T12:09:53 | 2016-03-09T12:09:53 | 53,101,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py |
import flume.flmos as flmo
import sys
for f in sys.argv[1:] :
ls = flmo.stat_file (f)
print "%s => %s" % (f, ls)
| [
"imosts"
] | imosts |
8b6b1b686c656f460928930a4a0b4fa4374f8ad9 | 18e48f22f88fe80ce54d12fdbf9d05a7ca5bd65a | /0x11-python-network_1/7-error_code.py | ad1698d9aff563aff2ccaec553148dfecf84b193 | [] | no_license | SantiagoHerreG/holbertonschool-higher_level_programming | 426c4bc9bc080a81b72d2f740c8ed2eb365023eb | ca2612ef3be92a60764d584cf39de3a2ba310f84 | refs/heads/master | 2020-07-22T19:33:48.507287 | 2020-02-14T04:34:00 | 2020-02-14T04:34:00 | 207,305,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | #!/usr/bin/python3
"""takes in a URL, sends a request to the URL and displays the body of
the response"""
import requests
import sys
if __name__ == "__main__":
url = sys.argv[1]
res = requests.get(url)
if res.status_code == requests.codes.ok:
print(res.text)
else:
print("Error code:", res.status_code)
| [
"[email protected]"
] | |
a800f9d568a1d7598f3cae018badde0c06ea9409 | 8578ae5be776b49559fa95ce30f6b45b6a82b73a | /test/functional/p2p_fingerprint.py | 0a572d97cfb88494d434474850b03427f50dd5ed | [
"MIT"
] | permissive | devcoin/core | 3f9f177bd9d5d2cc54ff95a981cfe88671206ae2 | f67e8b058b4316dd491615dc3f8799a45f396f4a | refs/heads/master | 2023-05-25T03:42:03.998451 | 2023-05-24T07:59:22 | 2023-05-24T08:02:14 | 21,529,485 | 16 | 13 | MIT | 2022-01-07T17:04:18 | 2014-07-05T22:42:13 | C | UTF-8 | Python | false | false | 5,061 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
p2p_lock,
)
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(DevcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
node0.wait_for_block(stale_hash, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
node0.wait_for_header(hex(stale_hash), timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
block_hash = int(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
assert_equal(self.nodes[0].getblockcount(), 14)
node0.wait_for_block(block_hash, timeout=3)
# Request for very old stale block should now fail
with p2p_lock:
node0.last_message.pop("block", None)
self.send_block_request(stale_hash, node0)
node0.sync_with_ping()
assert "block" not in node0.last_message
# Request for very old stale block header should now fail
with p2p_lock:
node0.last_message.pop("headers", None)
self.send_header_request(stale_hash, node0)
node0.sync_with_ping()
assert "headers" not in node0.last_message
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
node0.wait_for_block(block_hash, timeout=3)
self.send_header_request(block_hash, node0)
node0.wait_for_header(hex(block_hash), timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| [
"[email protected]"
] | |
1c1dcd8bc185c5981370cc6412b274be30918a26 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_PolyTrend_Seasonal_Minute_MLP.py | 613e814baf612b7e6b06d1d12091e2333056e4bd | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 172 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['MLP'] ); | [
"[email protected]"
] | |
59378ed1c249261ad53db470074838f10644f261 | 3381d3d1b70bd88374e75d90197d0202945bbade | /authentication/views.py | 1ea82fb55ee882e019ac000c9ca14bf94b9c33ca | [] | no_license | PHONGLEX/djangorestframework_quizapi | 30d5011b67a484a525c94071672f29ed2b0cb700 | c9f7b4ebdc00188533a0a5f44c13594011729fa4 | refs/heads/master | 2023-08-02T00:58:35.647091 | 2021-10-01T09:17:05 | 2021-10-01T09:17:05 | 412,402,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,554 | py | import jsonpickle
import jwt
from rest_framework import generics, status
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib import auth
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import smart_str, force_bytes, DjangoUnicodeDecodeError
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.sites.shortcuts import get_current_site
from django.conf import settings
from django.urls import reverse
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from .models import User
from .serializers import *
from .tasks import send_email_task
class RegisterView(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
user_data = serializer.data
user = User.objects.get(email=user_data['email'])
token = RefreshToken.for_user(user)
site = get_current_site(request).domain
url = reverse('email-verify')
link = 'http://' + site + url + "?token=" + str(token)
body = "Hi" + user.email + "\n Please use the link below to verify your account " + link
data = {
'subject': "Verify your account",
"body": body,
"to": user.email
}
send_email_task.delay(data)
return Response({'message': "We've sent you an email to verify your account"}, status=status.HTTP_201_CREATED)
class EmailVerificationView(generics.GenericAPIView):
serializer_class = EmailVerificationSerializer
token_param = openapi.Parameter('token', openapi.IN_QUERY, description="token param", type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param])
def get(self, request):
token = request.GET.get('token')
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms="HS256")
user = User.objects.get(id=payload['user_id'])
if not user.is_verified:
user.is_verified = True
user.save()
return Response({"message":"Successfully activate"}, status=status.HTTP_200_OK)
except jwt.exceptions.DecodeError as e:
return Response({"error": "Invalid token, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.ExpiredSignatureError as e:
return Response({"error": "Token is expired, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ResetPasswordView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data.get('email')
user = User.objects.filter(email=email)
if user.exists():
user = user[0]
uidb64 = urlsafe_base64_encode(force_bytes(jsonpickle.encode(user)))
token = PasswordResetTokenGenerator().make_token(user)
url = reverse('reset-password-confirm', kwargs={
'uidb64': uidb64,
'token': token
})
site = get_current_site(request).domain
link = 'http://' + site + url
body = "Hi " + user.email + "\n Please use the link below to reset your password " + link
data = {
'subject': "Reset your password",
"body": body,
"to": user.email
}
send_email_task.delay(data)
return Response({'message': "We've sent you an email to reset your password"}, status=status.HTTP_200_OK)
class CheckPasswordResetTokenView(APIView):
def post(self, request, uidb64, token):
try:
obj = smart_str(urlsafe_base64_decode(uidb64))
user = jsonpickle.decode(obj)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': "Invalid token, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
return Response({'success': True, 'uidb64': uidb64, 'token': token}, status=status.HTTP_200_OK)
except Exception as e:
return Response({'error': "Invalid token, please request a new one"}, status=status.HTTP_200_OK)
class SetNewPasswordView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({"message": "Changed password successfully"}, status=status.HTTP_200_OK)
class LogoutView(generics.GenericAPIView):
serializer_class = LogoutSerializer
permission_classes = (IsAuthenticated,)
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"[email protected]"
] | |
30d040917850dbfe213295e61066ca08ae2f4ddd | 509fc176af52f46ce62f54a6f63c7c27b1bd0c2c | /djangofiltertest/djangofiltertest/apps/posts_areas/api_v1/views.py | d95acb524992ab1ca2a3395a52d48a793ab3f132 | [
"MIT"
] | permissive | gonzaloamadio/django-filter-test | 8b16fdb989a8141ba5852cd4804148cb6b153e86 | 7b9dbc36ca248e2113deaac03e824b123a31a4ba | refs/heads/master | 2022-12-10T11:35:07.684916 | 2019-01-24T09:19:21 | 2019-01-24T09:19:21 | 167,159,577 | 0 | 0 | MIT | 2022-12-08T01:33:33 | 2019-01-23T09:54:40 | Python | UTF-8 | Python | false | false | 270 | py | from posts_areas.api_v1.serializers import PostAreaSerializer
from posts_areas.models import PostArea
from djangofiltertest.libs.views import APIViewSet
class PostAreaViewSet(APIViewSet):
queryset = PostArea.objects.all()
serializer_class = PostAreaSerializer
| [
"[email protected]"
] | |
dc90c334f8f9314e070b2c504c81d5c4b72155a3 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/core/util/tokenizer.py | 1a403b82516d25b5b6213598941a3ba5f7672ed2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,291 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for tokenizing strings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
__all__ = ['Literal', 'Separator', 'Tokenize']
_ESCAPE_CHAR = '\\'
class Literal(str):
pass
class Separator(str):
pass
def Tokenize(string, separators):
"""Tokenizes the given string based on a list of separator strings.
This is similar to splitting the string based on separators, except
that this function retains the separators. The separators are
wrapped in Separator objects and everything else is wrapped in
Literal objects.
For example, Tokenize('a:b,c:d', [':', ',']) returns [Literal('a'),
Separator(':'), Literal('b'), Separator(','), Literal('c'),
Separator(':'), Literal('d')].
Args:
string: str, The string to partition.
separators: [str], A list of strings on which to partition.
Raises:
ValueError: If an unterminated escape sequence is at the
end of the input.
Returns:
[tuple], A list of strings which can be of types Literal or
Separator.
"""
tokens = []
curr = io.StringIO()
buf = io.StringIO(string)
while True:
c = buf.read(1)
if not c:
break
elif c == _ESCAPE_CHAR:
c = buf.read(1)
if c:
curr.write(c)
continue
else:
raise ValueError('illegal escape sequence at index {0}: {1}'.format(
buf.tell() - 1, string))
elif c in separators:
tokens.append(Literal(curr.getvalue()))
tokens.append(Separator(c))
curr = io.StringIO()
else:
curr.write(c)
tokens.append(Literal(curr.getvalue()))
return tokens
| [
"[email protected]"
] | |
ea11bf784f41f2baf536fbb111241ab1f1165160 | 66c7b0da6ee27ddce0943945503cdecf199f77a2 | /rllib/util/parameter_decay.py | 2df23cd677dcb3091464bf29c075df7a3d8bd9ee | [
"MIT"
] | permissive | tzahishimkin/extended-hucrl | 07609f9e9f9436121bcc64ff3190c966183a2cd9 | c144aeecba5f35ccfb4ec943d29d7092c0fa20e3 | refs/heads/master | 2023-07-09T22:57:28.682494 | 2021-08-24T08:50:16 | 2021-08-24T08:50:16 | 383,819,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | """Implementation of a Parameter decay class."""
from abc import ABCMeta
import torch.jit
import torch.nn as nn
from rllib.util.neural_networks.utilities import inverse_softplus
class ParameterDecay(nn.Module, metaclass=ABCMeta):
"""Abstract class that implements the decay of a parameter."""
def __init__(self, start, end=None, decay=None):
super().__init__()
if not isinstance(start, torch.Tensor):
start = torch.tensor(start)
self.start = nn.Parameter(start, requires_grad=False)
if end is None:
end = start
if not isinstance(end, torch.Tensor):
end = torch.tensor(end)
self.end = nn.Parameter(end, requires_grad=False)
if decay is None:
decay = 1.0
if not isinstance(decay, torch.Tensor):
decay = torch.tensor(decay)
self.decay = nn.Parameter(decay, requires_grad=False)
self.step = 0
@torch.jit.export
def update(self):
"""Update parameter."""
self.step += 1
class Constant(ParameterDecay):
"""Constant parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return self.start
class Learnable(ParameterDecay):
"""Learnable parameter."""
positive: bool
def __init__(self, val, positive: bool = False):
self.positive = positive
if self.positive:
val = inverse_softplus(val).item()
super().__init__(val)
self.start.requires_grad = True
self.positive = positive
def forward(self):
"""See `ParameterDecay.__call__'."""
if self.positive:
return torch.nn.functional.softplus(self.start) + 1e-4
else:
return self.start
class ExponentialDecay(ParameterDecay):
"""Exponential decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
decay = torch.exp(-torch.tensor(1.0 * self.step) / self.decay)
return self.end + (self.start - self.end) * decay
class PolynomialDecay(ParameterDecay):
"""Polynomial Decay of a parameter.
It returns the minimum between start and end / step ** decay.
"""
def forward(self):
"""See `ParameterDecay.__call__'."""
return min(self.start, self.end / torch.tensor(self.step + 1.0) ** self.decay)
class LinearDecay(ParameterDecay):
"""Linear decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return max(self.end, self.start - self.decay * self.step)
class LinearGrowth(ParameterDecay):
"""Linear decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return min(self.end, self.start + self.decay * self.step)
class OUNoise(ParameterDecay):
"""Ornstein-Uhlenbeck Noise process.
Parameters
----------
mean: Tensor
Mean of OU process.
std_deviation: Tensor
Standard Deviation of OU Process.
theta: float
Parameter of OU Process.
dt: float
Time discretization.
dim: Tuple
Dimensions of noise.
References
----------
https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
"""
def __init__(self, mean=0, std_deviation=0.2, theta=0.15, dt=1e-2, dim=(1,)):
if not isinstance(mean, torch.Tensor):
mean = mean * torch.ones(dim)
self.mean = mean
if not isinstance(std_deviation, torch.Tensor):
std_deviation = std_deviation * torch.ones(dim)
self.std_dev = std_deviation
self.theta = theta
self.dt = dt
super().__init__(start=torch.zeros_like(mean))
def forward(self):
"""Compute Ornstein-Uhlenbeck sample."""
x_prev = self.start.data
x = (
x_prev
+ self.theta * (self.mean - x_prev) * self.dt
+ self.std_dev
* torch.sqrt(torch.tensor(self.dt))
* torch.randn(self.mean.shape)
)
self.start.data = x
return x
| [
"[email protected]"
] | |
8e1f9eeaa8eb59e4b8fd5822047b9e320adc32db | e2c120b55ab149557679e554c1b0c55126e70593 | /python/imagej/tests/test_ImgLib2_ImgFactory.py | 6b95c5b2582e670492dc615ed54d7090c3ee9152 | [] | no_license | acardona/scripts | 30e4ca2ac87b9463e594beaecd6da74a791f2c22 | 72a18b70f9a25619b2dbf33699a7dc1421ad22c6 | refs/heads/master | 2023-07-27T14:07:37.457914 | 2023-07-07T23:13:40 | 2023-07-07T23:14:00 | 120,363,431 | 4 | 5 | null | 2023-05-02T11:20:49 | 2018-02-05T21:21:13 | Python | UTF-8 | Python | false | false | 859 | py | from net.imglib2.img.array import ArrayImgFactory
from net.imglib2.type.numeric.integer import UnsignedByteType, UnsignedShortType
from net.imglib2.util import Intervals
# An 8-bit 256x256x256 volume
img = ArrayImgFactory(UnsignedByteType()).create([256, 256, 256])
# Another image of the same type and dimensions, but empty
img2 = img.factory().create([img.dimension(d) for d in xrange(img.numDimensions())])
# Same, but easier reading of the image dimensions
img3 = img.factory().create(Intervals.dimensionsAsLongArray(img))
# Same, but use an existing img as an Interval from which to read out the dimensions
img4 = img.factory().create(img)
# Now we change the type: same kind of image and same dimensions,
# but crucially a different pixel type (16-bit) via a new ImgFactory
imgShorts = img.factory().imgFactory(UnsignedShortType()).create(img)
| [
"[email protected]"
] | |
89e2d90ba4eedda9c8b3ce40056dde57e0048c0c | e60487a8f5aad5aab16e671dcd00f0e64379961b | /python_stack/Algos/leetcode_30days/max_subarray.py | 05464124d8c978cb2d1c61f8ef20653a3b199cf1 | [] | no_license | reenadangi/python | 4fde31737e5745bc5650d015e3fa4354ce9e87a9 | 568221ba417dda3be7f2ef1d2f393a7dea6ccb74 | refs/heads/master | 2021-08-18T08:25:40.774877 | 2021-03-27T22:20:17 | 2021-03-27T22:20:17 | 247,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
def findMaxIndex(nums):
maxIndex=0
for i in range(1,len(nums)):
if nums[maxIndex] < nums[i]:
maxIndex=i
return maxIndex
def maxCrossingSum(nums, l, m, h) :
# Include elements on left of mid.
sm = 0; left_sum = -10000
for i in range(m, l-1, -1) :
sm = sm + nums[i]
if (sm > left_sum) :
left_sum = sm
# Include elements on right of mid
sm = 0; right_sum = -1000
for i in range(m + 1, h + 1) :
sm = sm + nums[i]
if (sm > right_sum) :
right_sum = sm
# Return sum of elements on left and right of mid
return left_sum + right_sum;
def max_subArray_divide(nums,lowest,highest):
if lowest==highest:
return nums[lowest]
# Find Middle point
mid=(lowest+highest)//2
print(mid)
left_sum= max_subArray_divide(nums,lowest,mid)
right_sum= max_subArray_divide(nums,mid+1,highest)
cross_sum=maxCrossingSum(nums, lowest, mid, highest)
print(left_sum,right_sum,cross_sum)
return max(left_sum,right_sum,cross_sum)
def max_subArray(nums):
# divide and conqure
return max_subArray_divide(nums,0,len(nums)-1)
print(max_subArray([-2,1,-3,4,-1,2,1,-5,4])) | [
"[email protected]"
] | |
8c4d47286298016368282b45a4cb4e2dc67954f7 | f27c49458bde84048e6008da8c52ca0f1ae711ce | /code/07-data-structures/simple_dict/playground.py | b8e36ebaf6c98c36c3e8c2912fe99193322d5f38 | [
"MIT"
] | permissive | talkpython/python-for-absolute-beginners-course | 54b0f48b5edbf7755de6ca688a8e737ba16dc2fc | 1930dab0a91526863dc92c3e05fe3c7ec63480e1 | refs/heads/master | 2022-11-24T03:02:32.759177 | 2022-11-08T14:30:08 | 2022-11-08T14:30:08 | 225,979,578 | 2,287 | 1,059 | MIT | 2022-11-07T19:45:15 | 2019-12-05T00:02:31 | Python | UTF-8 | Python | false | false | 547 | py | # Data structures
# 1. Dictionaries
# 2. Lists / arrays [1,1,7,11]
# 3. Sets
# Lists
lst = [1, 1, 11, 7]
print(lst)
lst.append(2)
print(lst)
lst.remove(11)
print(lst)
lst.sort()
print(lst)
# Sets:
st = {1, 1, 11, 7}
st.add(1)
st.add(1)
st.add(11)
print(st)
# Dictionaries
d = {
'bob': 0,
'sarah': 0,
'defeated_by': {'paper', 'wolf'},
'defeats': {'scissors', 'sponge'}
}
print(d['bob'])
d['bob'] += 1
print(d['bob'])
print(d)
d['michael'] = 7
print(d)
print(f"You are defeated by {d['defeated_by']}")
print(d.get('other', 42))
| [
"[email protected]"
] | |
8dc61e64bb66988a363127243cb1b02813e86140 | a6a78f59f442c18449befc89be2b193e37b695d6 | /ivi/rigol/rigolDP800.py | fd988186043295e67c8db82281a56f6215da0aef | [
"MIT"
] | permissive | hohe/python-ivi | fa0b4b1232f4fca92bd046d2ae322e49959f8a83 | 0fe6d7d5aaf9ebc97085f73e25b0f3051ba996b6 | refs/heads/master | 2021-01-21T08:55:35.470107 | 2013-12-23T09:27:02 | 2013-12-23T09:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import dcpwr
from .. import scpi
TrackingType = set(['floating'])
TriggerSourceMapping = {
'immediate': 'imm',
'bus': 'bus'}
class rigolDP800(scpi.dcpwr.Base, scpi.dcpwr.Trigger, scpi.dcpwr.SoftwareTrigger,
scpi.dcpwr.Measurement):
"Rigol DP800 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
super(rigolDP800, self).__init__(*args, **kwargs)
self._instrument_id = 'Rigol Technologies,DP800'
self._output_count = 3
self._output_range = [[(8.0, 5.0)], [(30.0, 2.0)], [(-30.0, 2.0)]]
self._output_range_name = [['P8V'], ['P30V'], ['N30V']]
self._output_ovp_max = [8.8, 33.0, -33.0]
self._output_ocp_max = [5.5, 2.2, 2.2]
self._output_voltage_max = [8.0, 30.0, -30.0]
self._output_current_max = [5.0, 2.0, 2.0]
self._memory_size = 10
self._identity_description = "Rigol DP800 series DC power supply driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Rigol Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['DP831A', 'DP832', 'DP832A']
ivi.add_method(self, 'memory.save',
self._memory_save)
ivi.add_method(self, 'memory.recall',
self._memory_recall)
self._init_outputs()
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
| [
"[email protected]"
] | |
3efb3fa0f33c9db9e23e81ccddbd12529703f1e8 | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/build/msg_check/catkin_generated/pkg.installspace.context.pc.py | d23c59c9aa6dcd0265af7cbe01246235712f19cc | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/usr/include/eigen3".split(';') if "${prefix}/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;nav_msgs;roscpp;rospy;sensor_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmsg_check".split(';') if "-lmsg_check" != "" else []
PROJECT_NAME = "msg_check"
PROJECT_SPACE_DIR = "/home/rishabh/catkin_ws/install"
PROJECT_VERSION = "2.1.2"
| [
"[email protected]"
] | |
0e02e78b9bd8be2a809d040cede78b8f52514e05 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudfunctions/v2beta/cloudfunctions_v2beta_client.py | fcd7b507b5f649128645efa4b619ae74c347b2c0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 21,995 | py | """Generated client library for cloudfunctions version v2beta."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudfunctions.v2beta import cloudfunctions_v2beta_messages as messages
class CloudfunctionsV2beta(base_api.BaseApiClient):
"""Generated client library for service cloudfunctions version v2beta."""
MESSAGES_MODULE = messages
BASE_URL = 'https://cloudfunctions.googleapis.com/'
MTLS_BASE_URL = 'https://cloudfunctions.mtls.googleapis.com/'
_PACKAGE = 'cloudfunctions'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v2beta'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'CloudfunctionsV2beta'
_URL_VERSION = 'v2beta'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new cloudfunctions handle."""
url = url or self.BASE_URL
super(CloudfunctionsV2beta, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_functions = self.ProjectsLocationsFunctionsService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations_runtimes = self.ProjectsLocationsRuntimesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsFunctionsService(base_api.BaseApiService):
"""Service class for the projects_locations_functions resource."""
_NAME = 'projects_locations_functions'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsFunctionsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new function. If a function with the given name already exists in the specified project, the long running operation will return `ALREADY_EXISTS` error.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['functionId'],
relative_path='v2beta/{+parent}/functions',
request_field='function',
request_type_name='CloudfunctionsProjectsLocationsFunctionsCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a function with the given name from the specified project. If the given function is used by some trigger, the trigger will be updated to remove this function.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='DELETE',
method_id='cloudfunctions.projects.locations.functions.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def GenerateDownloadUrl(self, request, global_params=None):
r"""Returns a signed URL for downloading deployed function source code. The URL is only valid for a limited period and should be used within 30 minutes of generation. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGenerateDownloadUrlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GenerateDownloadUrlResponse) The response message.
"""
config = self.GetMethodConfig('GenerateDownloadUrl')
return self._RunMethod(
config, request, global_params=global_params)
GenerateDownloadUrl.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:generateDownloadUrl',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.generateDownloadUrl',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}:generateDownloadUrl',
request_field='generateDownloadUrlRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGenerateDownloadUrlRequest',
response_type_name='GenerateDownloadUrlResponse',
supports_download=False,
)
def GenerateUploadUrl(self, request, global_params=None):
r"""Returns a signed URL for uploading a function source code. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once the function source code upload is complete, the used signed URL should be provided in CreateFunction or UpdateFunction request as a reference to the function source code. When uploading source code to the generated signed URL, please follow these restrictions: * Source file type should be a zip file. * No credentials should be attached - the signed URLs provide access to the target bucket using internal service identity; if credentials were attached, the identity from the credentials would be used, but that identity does not have permissions to upload files to the URL. When making a HTTP PUT request, these two headers need to be specified: * `content-type: application/zip` And this header SHOULD NOT be specified: * `Authorization: Bearer YOUR_TOKEN`.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGenerateUploadUrlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GenerateUploadUrlResponse) The response message.
"""
config = self.GetMethodConfig('GenerateUploadUrl')
return self._RunMethod(
config, request, global_params=global_params)
GenerateUploadUrl.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions:generateUploadUrl',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.generateUploadUrl',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v2beta/{+parent}/functions:generateUploadUrl',
request_field='generateUploadUrlRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGenerateUploadUrlRequest',
response_type_name='GenerateUploadUrlResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns a function with the given name from the requested project.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Function) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGetRequest',
response_type_name='Function',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:getIamPolicy',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v2beta/{+resource}:getIamPolicy',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Returns a list of functions that belong to the requested project.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListFunctionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v2beta/{+parent}/functions',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsListRequest',
response_type_name='ListFunctionsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates existing function.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='PATCH',
method_id='cloudfunctions.projects.locations.functions.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v2beta/{+name}',
request_field='function',
request_type_name='CloudfunctionsProjectsLocationsFunctionsPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:setIamPolicy',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v2beta/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:testIamPermissions',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v2beta/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (CloudfunctionsProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='cloudfunctions.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (CloudfunctionsProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='cloudfunctions.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v2beta/{+name}/operations',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsRuntimesService(base_api.BaseApiService):
"""Service class for the projects_locations_runtimes resource."""
_NAME = 'projects_locations_runtimes'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsRuntimesService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Returns a list of runtimes that are supported for the requested project.
Args:
request: (CloudfunctionsProjectsLocationsRuntimesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListRuntimesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/runtimes',
http_method='GET',
method_id='cloudfunctions.projects.locations.runtimes.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter'],
relative_path='v2beta/{+parent}/runtimes',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsRuntimesListRequest',
response_type_name='ListRuntimesResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (CloudfunctionsProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations',
http_method='GET',
method_id='cloudfunctions.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v2beta/{+name}/locations',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
] | |
bd0443ac664d583b35f574b914b7d097a427430c | e5897d5b5eb3b018bec8703f01cfc666acea5b38 | /isy994/items/variables/variable_state.py | 9ff4bd1fca3dd2830528fb6ce10c205ddf9ea290 | [
"MIT"
] | permissive | mjcumming/ISY994v5 | 5de41ce7e12be44c35dc0818daf639bb8c0e5487 | 928d8359fd15363e15b8daa402fbb1f5f53f3c45 | refs/heads/master | 2022-05-19T06:10:59.788621 | 2022-05-08T13:16:29 | 2022-05-08T13:16:29 | 187,289,265 | 4 | 10 | MIT | 2021-06-26T13:34:23 | 2019-05-17T22:36:55 | Python | UTF-8 | Python | false | false | 219 | py | #! /usr/bin/env python
from .variable_base import Variable_Base
class Variable_State(Variable_Base):
def __init__(self, container, variable_info):
Variable_Base.__init__(self, container, variable_info)
| [
"[email protected]"
] | |
1e547431f1304fab875e263c577d86e91b92a9ce | 747f759311d404af31c0f80029e88098193f6269 | /addons/base_partner_surname/partner.py | 254932e02e9d5fd896883ede6d5d21855b6e91c1 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /home/openerp/production/extra-addons/base_partner_surname/partner.py | [
"[email protected]"
] | |
70964f54b2c252bf34810cb4b378fc77f351ef7d | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/dpacreative/model/bat_set_range_response_wrapper_body.py | bf3a52bf83e136d4770a30049ef79667567a1bfd | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 10,962 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
class BatSetRangeResponseWrapperBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'data': ([dict],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BatSetRangeResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([dict]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BatSetRangeResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([dict]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
b59641920ce0787bdda82226455c999d8bfa5e60 | 23b3c698412f71a2878ae586f5599f2b6e38c980 | /source-code/bokeh/ising.py | 1dab727e38f3966bbe674a61422b703eec89d4d9 | [
"CC-BY-4.0"
] | permissive | gjbex/Scientific-Python | 9b7ae7b3398cc9358d1f530ca24243b63f3c01f0 | 02d24e6e22cfbc5b73429a2184ecbdfcd514c8fc | refs/heads/master | 2023-08-17T10:17:39.963630 | 2023-05-12T14:51:32 | 2023-05-12T14:51:32 | 221,184,612 | 13 | 13 | CC-BY-4.0 | 2023-08-03T08:04:57 | 2019-11-12T09:55:27 | Jupyter Notebook | UTF-8 | Python | false | false | 964 | py | #!/usr/bin/env python
from argparse import ArgumentParser
from bokeh.layouts import column
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.plotting import curdoc, figure
import numpy as np
x = np.linspace(-3.0, 3.0, 301)
y = x.copy()
default_beta = 4.0
y_tanh = np.tanh(default_beta*x)
source = ColumnDataSource(data=dict(x=x, y=y_tanh))
def callback(attr, old_value, new_value):
beta = new_value
new_data = {
'x': source.data['x'],
'y': np.tanh(beta*source.data['x']),
}
source.data = new_data
plot = figure(width=300, height=300)
plot.line(x, y, line_width=0.5, line_dash='3 3')
plot.line('x', 'y', source=source)
plot.xaxis.axis_label = '$$x$$'
plot.yaxis.axis_label = r'$$\tanh \beta x$$'
slider = Slider(start=0.2, end=6.0, value=default_beta, step=0.01,
title=r'$$\beta$$')
slider.on_change('value', callback)
layout = column(children=[plot, slider])
curdoc().add_root(layout)
| [
"[email protected]"
] | |
94ad119004a4fd0ddd961a8ed9e3b31bb811fd1a | 1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2 | /agents/game/human_agent.py | b04040eb7f8fea24819cc7ddc959c01950f3bda1 | [
"MIT"
] | permissive | cjreynol/willsmith | 02f793003a914a21b181839bbd58108046f312d6 | 39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5 | refs/heads/master | 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | from agents.displays.human_display import HumanDisplay
from willsmith.game_agent import GameAgent
class HumanAgent(GameAgent):
"""
Agent that relies on user input to make action choices.
It relies on its action_prompt attribute, set externally by the
simulator, to provide the proper prompts and to construct the action.
"""
GUI_DISPLAY = None #HumanDisplay is not yet ready
INPUT_PROMPT = None
INPUT_PARSER = None
def __init__(self, agent_id, use_gui, action):
super().__init__(agent_id, use_gui)
self.add_input_info(action)
def add_input_info(self, action):
HumanAgent.INPUT_PROMPT = action.INPUT_PROMPT
HumanAgent.INPUT_PARSER = action.parse_action
def search(self, state, allotted_time):
"""
Prompt the player for an action until a legal action is chosen, then
return it.
"""
legal_actions = state.get_legal_actions()
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
while player_action not in legal_actions:
print("Last move was not legal, please try again.\n")
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
return player_action
def _take_action(self, action):
pass
def _reset(self):
pass
def __str__(self):
return ""
| [
"[email protected]"
] | |
70f4e03aa8a2930c56a4ec84979dc5bb1e836e28 | 745a605d52556d5195b7cdbf871fc1011b2dc9cd | /backend/mete/models.py | 92b2828ee3753d37d2fa5baa61d5d362342dc181 | [] | no_license | annikahannig/meteme | 96a6b919fbdac20bef7e13e1d101130cd1805b7b | 16ca646904a31833e8d1156be8f554e11ff0d37a | refs/heads/master | 2021-06-25T05:34:23.517379 | 2017-05-09T20:33:54 | 2017-05-09T20:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,526 | py | from __future__ import unicode_literals
from collections import OrderedDict
from django.db import models
from django.conf import settings
from djmoney.models.fields import MoneyField
from moneyed import Money
from solo.models import SingletonModel
from store import models as store_models
from unidecode import unidecode
import re
class Account(models.Model):
"""
User account:
We manage user accounts, separate from 'Users', because
they don't have a password, may not have an email,
and have an avatar.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='avatars/',
default='/static/store/img/default_avatar.png',
null=True, blank=True)
balance = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR',
default=Money(0, 'EUR'))
is_locked = models.BooleanField(default=False)
is_disabled = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.name
@property
def name(self):
return self.user.username
@property
def canonical_name(self):
"""Return normalized username"""
name = unidecode(self.name) # Transliterate umlauts
name = re.sub(r'\W', '', name).lower()
return name
class Barcode(models.Model):
"""
Barcode(s) can be associated with an account
or with a product.
"""
number = models.CharField(unique=True, max_length=42)
product = models.ForeignKey(store_models.Product,
null=True,
blank=True,
on_delete=models.CASCADE)
account = models.ForeignKey(Account,
null=True,
blank=True,
on_delete=models.CASCADE)
class KeyPair(models.Model):
"""
A user may supply a public/private key pair,
so we can encrypt the audit log.
If a user does not have a key pair, no personal
log will be created.
The the keys are created on the client using the NaCL
crypto library.
The private key is encrypted with a key derived from a password / pin,
using the 'Password-Base Key Derivation Function 2' (PBKDF2) with
at least 3 million iterations.
The first 4 bytes of the encrypted private key determin
additional hashing rounds as a measure against rainbow tables.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
crypto_version = models.PositiveSmallIntegerField(default=1)
private_key = models.CharField(max_length=68,
blank=False,
null=False,
unique=True)
public_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
verify_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class TransactionManager(models.Manager):
def get_queryset(self):
"""
Override default queryset to order transactions
by date DESC
"""
qs = super(TransactionManager, self).get_queryset()
qs = qs.order_by('-created_at')
return qs
def donations(self):
transactions = self.get_queryset()
return transactions.filter(product__isnull=False)
def donations_grouped_months(self):
""" Get donations, grouped by month """
donations = self.donations()
groups = OrderedDict()
for transaction in donations:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
def grouped(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
date = transaction.created_at
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if groups.get(date) is None:
groups[date] = []
groups[date].append(transaction)
return groups
def grouped_month(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
class Transaction(models.Model):
"""
Log Transactions.
Do not store the associated account.
This is just an audit log.
"""
amount = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR')
product = models.ForeignKey('store.Product', null=True, blank=True)
product_name = models.CharField(null=True, blank=True, max_length=80)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
objects = TransactionManager()
class UserSetting(models.Model):
"""
Configure per user preferences, like:
Limiting categories. (This is it for now)
"""
user = models.OneToOneField('auth.User',
null=False,
blank=False,
on_delete=models.CASCADE)
categories = models.ManyToManyField('store.Category',
blank=True)
class Settings(SingletonModel):
price_set = models.ForeignKey('store.PriceSet', null=True, blank=False, default=1)
| [
"[email protected]"
] | |
f0701b76e300b53794a20d383a41472054a14abe | c459f4dd7b198ec8d8db8379726a5b2650be6636 | /regis/apps.py | b08ff1b7229ca929d911653fbb1a9cf748bcef33 | [] | no_license | jittat/admapp | 4c712182cd06e82efab6c2513fb865e5d00feae8 | 38bf299015ae423b4551f6b1206742ee176b8b77 | refs/heads/master | 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 | Python | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class RegisConfig(AppConfig):
name = 'regis'
| [
"[email protected]"
] | |
5cf9e4839963c2c5dace99204f707d7e8424f061 | 14c5bd382ac9ffbfa4ae34f244bca6685f3cd18c | /apps/geotracker/models.py | d3eff90a8929fa59880c39ed709ce3692949a42b | [] | no_license | redhog/arwen | e8705e978588163554c83e3278297506c1ffb2ce | 342daa97a72c0776d4dfe27196adfe66d4dff63c | refs/heads/master | 2021-01-17T13:08:09.392613 | 2011-08-26T09:21:40 | 2011-08-26T09:21:40 | 2,084,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,235 | py | # -*- coding: utf-8 -*-
import django.contrib.auth.models
from django.utils.translation import ugettext_lazy as _
import django.contrib.gis.db.models
import geotracker.geos
import linkableobject.models
class Vehicle(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="owned_vehicles")
def __unicode__(self):
return self.name
class TimePoint(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
point = django.contrib.gis.db.models.PointField(geography=True)
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp)
@property
def as_geoscollection(self):
return geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
def __unicode__(self):
return "%s @ %s" % (self.point, self.timestamp)
class Path(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.LineString([point.point for point in self.points.order_by('timestamp')]),
self.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for point in self.points.order_by('timestamp'):
res += point.as_geoscollection
return res
def __unicode__(self):
return self.name
class PathPoint(TimePoint):
path = django.contrib.gis.db.models.ForeignKey(Path, related_name='points')
path.verbose_related_name = _("Points")
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp, path = self.path.id)
class Journey(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
vehicle = django.db.models.ForeignKey(Vehicle, related_name="journeys")
vehicle.verbose_related_name = _("Journeys")
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="organized_journeys")
owner.verbose_related_name = _("Organized journeys")
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.MultiLineString([path.as_geosfeature.geometry for path in self.paths.order_by('timestamp')]),
self.id,
vehicle = self.vehicle.id,
owner = self.owner.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for path in self.paths.order_by('timestamp'):
res += path.as_geoscollection
return res
def __unicode__(self):
return self.name
class JourneyPath(Path):
journey = django.contrib.gis.db.models.ForeignKey(Journey, related_name='paths', verbose_name=_('Journey'))
journey.verbose_related_name = _("Paths")
| [
"[email protected]"
] | |
e1516bbfce063e8d56341ca439e8cf70dfc77eed | 2b5fd9d436a97726f852a12bab58b8d367f4866a | /api/urls.py | 2a552766f2d17ea023c0ec9ea230e41593ce2a2f | [] | no_license | lxlzyf/roe | 07ff551b142c0411acb7ca6f759ea98b40ad9b72 | 2d7f1b01e2456875d14a75c90d8397965215bcd3 | refs/heads/master | 2020-03-27T06:00:43.587235 | 2018-08-20T10:47:47 | 2018-08-20T10:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.conf.urls import url
from api.views import user_api
urlpatterns = [
url(r'^group/$', assets_api.group_list),
url(r'^group/(?P<id>[0-9]+)/$',assets_api.group_detail),
url(r'^user/$', user_api.user_list),
url(r'^user/(?P<id>[0-9]+)/$',user_api.user_detail),
] | [
"[email protected]"
] | |
22126b447591b464ad5a6d753bb645c15ea5ed06 | 531f8027890188eb037a9dbe68d63882eb2e0ead | /demos/ebeam/flash/flash_mismatch.py | 77b5f709db45ef41f935bc5ad434b0e1d972c21e | [] | no_license | Cartj/desy | 057947dd5e3e4fce085472dc145461cea68be8e9 | 9a1f12e7cf7040e28614e95dc5c49bc10d36b092 | refs/heads/master | 2020-03-21T06:01:54.315274 | 2016-08-16T13:04:56 | 2016-08-16T13:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | __author__ = 'Sergey Tomin'
from ocelot import *
from ocelot.gui import *
from pylab import *
exec( open("lattice_FLASH_S2E.py" ))
beam = Beam()
beam.E = 148.3148e-3 #in GeV ?!
beam.beta_x = 14.8821
beam.beta_y = 18.8146
beam.alpha_x = -0.61309
beam.alpha_y = -0.54569
beam.emit_xn = 1.5e-6
beam.emit_yn = 1.5e-6
beam.emit_x = beam.emit_xn / (beam.E / m_e_GeV)
beam.emit_y = beam.emit_yn / (beam.E / m_e_GeV)
beam.tlen=2e-3 # in m
tw0 = Twiss(beam)
lat = MagneticLattice(lattice)
tws_m=twiss(lat, tw0, nPoints=None)
plot_opt_func(lat, tws_m, top_plot = ["Dx", "Dy"], fig_name="optics")
#plt.show()
mx = 1.
my = 1.
Mx_b = []
My_b = []
S = []
for elem, tws in zip(lat.sequence,tws_m[1:]):
dk = 0.
if elem.type == "quadrupole":
dk_k = -0.05
#if elem.id in ["Q8TCOL", "Q2UBC3", "Q6DBC2"]:
# dk_k = np.random.rand()/100.
dk = dk_k*elem.k1
elem.k1 = elem.k1*(1. + dk_k)
mx += 0.5*((dk*elem.l*tws.beta_x*cos(2*tws.mux))**2 + (dk*elem.l*tws.beta_x*sin(2*tws.mux))**2)
my += 0.5*((dk*elem.l*tws.beta_y*cos(2*tws.muy))**2 + (dk*elem.l*tws.beta_y*sin(2*tws.muy))**2)
Mx_b.append(mx)
My_b.append(my)
S.append(tws.s)
lat = MagneticLattice(lattice)
tws_e=twiss(lat, tw0, nPoints=None)
t = tw0
x = linspace(-sqrt(t.beta_x-1e-7), sqrt(t.beta_x-1e-7), num=200)
#print t.beta_x - x*x
x1 = (sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
x2 = (-sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
a = sqrt(0.5*((t.beta_x + t.gamma_x) + sqrt((t.beta_x + t.gamma_x)**2 - 4.)))
theta = arctan(-2.*t.alpha_x/(t.beta_x - t.gamma_x))/2.
t = linspace(0, 2*pi, num=100)
xe = a*cos(t)*cos(theta) - 1./a*sin(t)*sin(theta)
ye = a*cos(t)*sin(theta) + 1./a*sin(t)*cos(theta)
plt.plot(x, x1, x, x2)
plt.plot(xe, ye)
plt.show()
Mx = []
My = []
Mx2 = []
My2 = []
for tm, te in zip(tws_m, tws_e):
bx_n = te.beta_x/tm.beta_x
by_n = te.beta_y/tm.beta_y
ax_n = -te.alpha_x + tm.alpha_x*bx_n
ay_n = -te.alpha_y + tm.alpha_y*by_n
gx_n = -2.*te.alpha_x*tm.alpha_x + tm.alpha_x**2*bx_n + tm.beta_x*te.gamma_x
gy_n = -2.*te.alpha_y*tm.alpha_y + tm.alpha_y**2*by_n + tm.beta_y*te.gamma_y
mx = 0.5*(bx_n + gx_n) + sqrt((bx_n + gx_n)**2 - 4.)
#print (by_n + gy_n)**2 - 4.
my = 0.5*(by_n + gy_n) + sqrt((by_n + gy_n)**2 - 4.)
Mx.append(sqrt(mx))
My.append(sqrt(my))
Mx2.append(sqrt(0.5*(tm.beta_x*te.gamma_x - 2.*te.alpha_x*tm.alpha_x + te.beta_x*tm.gamma_x)))
My2.append(sqrt(0.5*(tm.beta_y*te.gamma_y - 2.*te.alpha_y*tm.alpha_y + te.beta_y*tm.gamma_y)))
s = [p.s for p in tws_m]
bx_e = [p.beta_x for p in tws_e]
bx_m = [p.beta_x for p in tws_m]
plt.plot(s, bx_m,"r", s, bx_e, "b")
plt.show()
plt.plot(s, Mx, "r", s, My, "b")
#plt.plot(s, Mx2, "r.", s, My2, "b.")
plt.plot(S, Mx_b, "ro-", S, My_b, "bo-")
plt.show()
| [
"[email protected]"
] | |
4778c6986b6120a7ef560780ffc43c77d358ed22 | 4c9580b2e09e2b000e27a1c9021b12cf2747f56a | /chapter13/xiaoyu_mall/xiaoyu_mall/apps/areas/migrations/0001_initial.py | 079ebb7f05049decffb2551a21f8dbc383e69e82 | [] | no_license | jzplyy/xiaoyue_mall | 69072c0657a6878a4cf799b8c8218cc7d88c8d12 | 4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc | refs/heads/master | 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # Generated by Django 2.2.3 on 2019-11-15 06:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='areas.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '省市区',
'verbose_name_plural': '省市区',
'db_table': 'tb_areas',
},
),
]
| [
"[email protected]"
] | |
aaa181dee0af914a8a8cbeccec8f6850df142d4a | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/docutils/writers/html4css1/__init__.py | f87a1055b0c23f9253762d020667a7431458732a | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 33,869 | py | # $Id: __init__.py 8035 2017-02-13 22:01:47Z milde $
# Author: David Goodger
# Maintainer: [email protected]
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import frontend, nodes, writers, io
from docutils.transforms import writer_aux
from docutils.writers import _html_base
class Writer(writers._html_base.Writer):
supported = ('html', 'html4', 'html4css1', 'xhtml', 'xhtml10')
"""Formats this writer supports."""
default_stylesheets = ['html4css1.css']
default_stylesheet_dirs = ['.',
os.path.abspath(os.path.dirname(__file__)),
# for math.css
os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'html5_polyglot'))
]
default_template = 'template.txt'
default_template_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), default_template)
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % ','.join(default_stylesheets),
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheets}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
config_section = 'html4css1 writer'
def __init__(self):
self.parts = {}
self.translator_class = HTMLTranslator
class HTMLTranslator(writers._html_base.HTMLTranslator):
"""
The html4css1 writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in older browsers (although they really
shouldn't).
The html5_polyglot writer solves this using CSS2.
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
# The following definitions are required for display in browsers limited
# to CSS1 or backwards compatible behaviour of the writer:
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
content_type_mathml = ('<meta http-equiv="Content-Type"'
' content="application/xhtml+xml; charset=%s" />\n')
# encode also non-breaking space
special_characters = dict(_html_base.HTMLTranslator.special_characters)
special_characters[0xa0] = ' '
# use character reference for dash (not valid in HTML5)
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
# ersatz for first/last pseudo-classes missing in CSS1
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
# add newline after opening tag
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=False)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
# ersatz for first/last pseudo-classes
def visit_admonition(self, node):
node['classes'].insert(0, 'admonition')
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
# author, authors: use <br> instead of paragraphs
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors = True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
# use "width" argument insted of "style: 'width'":
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
#
def depart_colspec(self, node):
# write out <colgroup> when all colspecs are processed
if isinstance(node.next_node(descend=False, siblings=True),
nodes.colspec):
return
if 'colwidths-auto' in node.parent.parent['classes'] or (
'colwidths-auto' in self.settings.table_style and
('colwidths-given' not in node.parent.parent['classes'])):
return
total_width = sum(node['colwidth'] for node in self.colspecs)
self.body.append(self.starttag(node, 'colgroup'))
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / total_width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.body.append('</colgroup>\n')
# Compact lists:
# exclude definition lists and field lists (non-compact by default)
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
# TODO: self.in_contents
or self.check_simple_list(node))))
# citations: Use table for bibliographic references.
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
# insert classifier-delimiter (not required with CSS2)
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
# ersatz for first/last pseudo-classes
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
# don't add "simple" class value
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
# use a table for description lists
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
# use table for docinfo
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
# add newline after opening tag
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
# insert an NBSP into empty cells, ersatz for first/last
def visit_entry(self, node):
writers._html_base.HTMLTranslator.visit_entry(self, node)
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
# ersatz for first/last pseudo-classes
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
cannot be emulated in CSS1 (HTML 5 reincludes it).
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
# use table for field-list:
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='field')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
# use table for footnote text
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
# Python 2.4 fails with enumerate(backrefs, 1)
for (i, backref) in enumerate(backrefs):
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i+1))
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
# insert markers in text as pseudo-classes are not supported in CSS1:
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
# just pass on generated text
def visit_generated(self, node):
pass
# Image types to place in an <object> element
# SVG not supported by IE up to version 8
# (html4css1 strives for IE6 compatibility)
object_image_types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
# use table for footnote text,
# context added in footnote_backrefs.
def visit_label(self, node):
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
# ersatz for first/last pseudo-classes
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
# use <tt> (not supported by HTML5),
# cater for limited styling options in CSS1 using hard-coded NBSPs
def visit_literal(self, node):
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
# add newline after opening tag, don't use <code> for code
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
# add newline
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
# use table for option list
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
# Omit <p> tags to produce visually compact lists (less vertical
# whitespace) as CSS styling requires CSS2.
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return False
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return True
return False
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
# ersatz for first/last pseudo-classes
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = True
# <sub> not allowed in <pre>
def visit_subscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append(self.starttag(node, 'span', '',
CLASS='subscript'))
else:
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append('</span>')
else:
self.body.append('</sub>')
# Use <h*> for subtitles (deprecated in HTML 5)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
# <sup> not allowed in <pre> in HTML 4
def visit_superscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append(self.starttag(node, 'span', '',
CLASS='superscript'))
else:
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append('</span>')
else:
self.body.append('</sup>')
# <tt> element deprecated in HTML 5
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
# "hard coded" border setting
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ['docutils', self.settings.table_style]
if 'align' in node:
classes.append('align-%s' % node['align'])
self.body.append(
self.starttag(node, 'table', CLASS=' '.join(classes), border="1"))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
# hard-coded vertical alignment
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody', valign='top'))
#
def depart_tbody(self, node):
self.body.append('</tbody>\n')
# hard-coded vertical alignment
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead', valign='bottom'))
#
def depart_thead(self, node):
self.body.append('</thead>\n')
class SimpleListChecker(writers._html_base.SimpleListChecker):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
# def visit_bullet_list(self, node):
# pass
# def visit_enumerated_list(self, node):
# pass
# def visit_paragraph(self, node):
# raise nodes.SkipNode
def visit_definition_list(self, node):
raise nodes.NodeFound
def visit_docinfo(self, node):
raise nodes.NodeFound
def visit_definition_list(self, node):
raise nodes.NodeFound
| [
"[email protected]"
] | |
c5e60a89ed2a73c9c155f1c67d66ad55d13bc4ba | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /17/addons/plugin.video.ukturk/resources/lib/scraper2.py | 0c1a6e03d1453afd6847bd928d43d611c2b92671 | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,184 | py | # coding: UTF-8
import sys
l111ll1llUK_Turk_No1 = sys.version_info [0] == 2
l11l1l11lUK_Turk_No1 = 2048
l111llll1UK_Turk_No1 = 7
def l11l1lUK_Turk_No1 (l1llll1lUK_Turk_No1):
global l1l1ll1llUK_Turk_No1
l11lllll1UK_Turk_No1 = ord (l1llll1lUK_Turk_No1 [-1])
l11l111llUK_Turk_No1 = l1llll1lUK_Turk_No1 [:-1]
l1lll1lllUK_Turk_No1 = l11lllll1UK_Turk_No1 % len (l11l111llUK_Turk_No1)
l1l11llllUK_Turk_No1 = l11l111llUK_Turk_No1 [:l1lll1lllUK_Turk_No1] + l11l111llUK_Turk_No1 [l1lll1lllUK_Turk_No1:]
if l111ll1llUK_Turk_No1:
l1ll1llUK_Turk_No1 = unicode () .join ([unichr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
else:
l1ll1llUK_Turk_No1 = str () .join ([chr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
return eval (l1ll1llUK_Turk_No1)
import urllib,urllib2,re,os
def l11lll11l1UK_Turk_No1():
string=l11l1lUK_Turk_No1 (u"ࠨࠩැ")
link=l1llll111UK_Turk_No1(l11l1lUK_Turk_No1 (u"ࠤࡸࡹࡶ࠺࠰࠱ࡦࡶࡩࡦࡳࡧࡨ࠲ࡸࡩࡧࡱࡲࡸࡧࡧࡻ࠭ࡴࡶࡵࡩࡦࡳࠢෑ"))
events=re.compile(l11l1lUK_Turk_No1 (u"ࠪࡀࡹࡪ࠾࠽ࡵࡳࡥࡳࠦࡣࡣࡶࡷࡂࠨࡳࡱࡱࡵࡸ࠲ࡣࡰࡰࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡶࡃ࠭ි"),re.DOTALL).findall(link)
for event in events:
l11lll111lUK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠫࡁࡺࡤࠪ࠱ࡄ࠼ࡣࡴࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡨࡃ࠭ී")).findall(event)
for day,date in l11lll111lUK_Turk_No1:
day=l11l1lUK_Turk_No1 (u"ࠬࡡࡃࡐࡎࡒࡖࠥࡦࡠࠫු")+day+l11l1lUK_Turk_No1 (u"࡛࠭࠰ࡅࡒࡐࡔࡘࠨ")
date=date.replace(l11l1lUK_Turk_No1 (u"ࠧࠩූ"),l11l1lUK_Turk_No1 (u"ࠨࠩ"))
time=re.compile(l11l1lUK_Turk_No1 (u"ࠩࡸࡩࠦࡣࡣࡶࡷࡂࠨࡢࡶࡦࡹࡦࠤࠣࡷࡹࡿࡦࠥࡧࡴࡲࡳ࠼ࠦ࠹࠹࠻࠴࠶࠶࠾ࡪࡴࡴࡴࡹࡨࡴ࠻ࡤࡲࡰࡩࡁࡦࡰࡰࡷ࠱ࡸࡺࡦ࠼ࠣ࠽ࡵࡾࠢࠪ࠱ࡄ࠼࠰ࡶࡧࡂࠬෘ")).findall(event)[0]
time=l11l1lUK_Turk_No1 (u"ࠪࡈࡕࡌࡐࡔࠣࡦࡱࡻࡥ࡞ࠪࠪෙ")+time+l11l1lUK_Turk_No1 (u"ࠫࡡࡄࡑࡏࡓࡗࡣࠧේ")
l11lll1l11UK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠬࡂࡡࠡࡵࡷࡽࡱ࠽ࠣࡶࡨࡼࡹ࠳ࡤࡦࡥࡲࡶࡦࡺࡩࡰࡰ࠽ࡲࡴࡴࡥࠡࠣࡱࡵࡵࡲࡵࡣࡱࡸࡀࡩࡱࡵ࠾ࠨ࠻࠴࠶࠶࠸࠸ࡀࠨࠠࡩࡴࡨࡪࡂࠨࠨ࠭ࡂ࠭ࠧࠦࡴࡢࡴࡪࡩࡹࡃࠢࡠࡤࡥࡳࡱࠢࠪ࠱ࡄ࠼࠰ࡣࡁࡀ࠴ࡺࡤࠩෛ")).findall(event)
for url,l11lll11llUK_Turk_No1 in l11lll1l11UK_Turk_No1:
url=url
l11lll11llUK_Turk_No1=l11lll11llUK_Turk_No1
string=string+l11l1lUK_Turk_No1 (u"࠭࠾ࡸࡪࡳ࠾ࡰࡸࡺࡦࡀࠨࡷࡁ࠵ࡴࡪࡶࡩࡃࡢ࠽ࡵࡳࡳࡷࡺࡳࡥࡧࡹࡱࡄࠥࡴ࠾࠲ࡷࡵࡵࡲࡵࡵࡧࡩࡻ࡞ࡱࠫො")%(day+l11l1lUK_Turk_No1 (u"ࠧࠡࠩෝ")+time+l11l1lUK_Turk_No1 (u"ࠨࠢ࠰ࠤࠬෞ")+l11lll11llUK_Turk_No1,url)
string=string+l11l1lUK_Turk_No1 (u"ࠩࡸࡻࡣࡰࡤࡱࡄࡉࡣࡪࡩࡍࡲࡦ࠾࠲ࡸࡻࡣࡰࡤࡱࡄ࠾ࡩࡥࡳࡧࡲࡵࡀࡩࡥࡳࡧࡲࡵ࠾࠲ࡪࡦࡴࡡࡳࡶࡁࡠࡳࡂࡪࡶࡨࡱࡃࡢࠨෟ")
return string
def l1llll111UK_Turk_No1(url):
req = urllib2.Request(url)
req.add_header(l11l1lUK_Turk_No1 (u"࡙ࠪࡸࡲࡃࡪࡩࡳࡺࠧ"), l11l1lUK_Turk_No1 (u"ࠫࡒࡵࡺࡪࡥ࠴࠻࠱࡛ࠢࠫࡴࡤࡰࡹࡶࠤࡓ࡚ࠠ࠲࠲࠱࠴ࠦࡁࡱࡲࡩࡢࡌࡷ࠳࠺࠹࠷࠵࠹ࠤ࠭ࡑࡈࡕࡏࡏ࠰ࠥࡲࡩࡧࠣࡋࡪࡩࡰࠫࠣࡇࡸࡧ࠲࠹࠹࠴࠰࠴࠻࠸࠵࠴࠷࠲ࠢࡖࡥࡧࡲࡪ࠱࠸࠷࠼࠴࠳࠷ࠩ"))
response = urllib2.urlopen(req)
link=response.read()
return link | [
"[email protected]"
] | |
94ed5e380f49bf3d497d587c95ec1d3ec6e65bad | dcbedd4c06aa0cf78cf1d881a61f2a0cdb06005a | /(Keras) IMDB Dataset.py | 756f84210ce7f7a14cdf371a8ffa4145def4e726 | [] | no_license | KevinHooah/recurrent-dropout-experiments | 064243f403687a7e063a6464ce015d282a8a0dfb | 96b2aa2478fb46a252251c0b49354a2de40c7684 | refs/heads/master | 2020-08-29T23:43:01.440740 | 2019-08-07T03:43:23 | 2019-08-07T03:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,816 | py |
# coding: utf-8
# # (Keras) IMDB Dataset
# In[1]:
import numpy as np
from tensorflow.contrib.keras.python.keras.optimizers import SGD, RMSprop, Adagrad
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers.core import Dense, Dropout
from tensorflow.contrib.keras.python.keras.layers.embeddings import Embedding
from tensorflow.contrib.keras.python.keras.layers.recurrent import LSTM, GRU, SimpleRNN
from tensorflow.contrib.keras.python.keras.regularizers import l2
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.datasets import imdb
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from yaringal_callbacks import ModelTest
from yaringal_dataset import loader
get_ipython().magic('matplotlib inline')
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (8, 5)
# Global params:
NB_WORDS = 20000
SKIP_TOP = 0
TEST_SPLIT = 0.2
INIT_SEED = 2017
GLOBAL_SEED = 2018
MAXLEN = 80
BATCH_SIZE = 128
TEST_BATCH_SIZE = 512
WEIGHT_DECAY = 1e-4
# In[2]:
np.random.seed(100)
# In[3]:
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=NB_WORDS)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=MAXLEN)
X_test = sequence.pad_sequences(X_test, maxlen=MAXLEN)
print('x_train shape:', X_train.shape)
print('x_test shape:', X_test.shape)
# In[4]:
def get_model(idrop=0.2, edrop=0.1, odrop=0.25, rdrop=0.2, weight_decay=WEIGHT_DECAY):
model = Sequential()
model.add(Embedding(NB_WORDS, 128, embeddings_regularizer=l2(weight_decay),
input_length=MAXLEN)) # , batch_input_shape=(batch_size, maxlen)))
if edrop:
model.add(Dropout(edrop))
model.add(LSTM(128, kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), dropout=idrop, recurrent_dropout=rdrop))
if odrop:
model.add(Dropout(odrop))
model.add(Dense(1, kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), activation='sigmoid'))
optimizer = Adam(1e-3)
model.compile(loss='binary_crossentropy', metrics=["binary_accuracy"], optimizer=optimizer)
return model
# ## Normal Variational LSTM (w/o Embedding Dropout)
# All models in this notebook do not have embedding dropout as Keras does not have such layer.
# In[5]:
print('Build model...')
model = get_model(idrop=0.25, edrop=0, odrop=0.25, rdrop=0.25, weight_decay=1e-4)
# In[6]:
modeltest_1 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[7]:
history_1 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_1])
# In[11]:
best_epoch = np.argmin([x[1] for x in modeltest_1.history[:18]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_1.history[best_epoch-1][1],
modeltest_1.history[best_epoch-1][3] * 100,
best_epoch
))
# In[12]:
plt.title("Log Loss Comparison")
plt.plot(np.arange(len(modeltest_1.history)), [x[0] for x in modeltest_1.history], label="std")
plt.plot(np.arange(len(modeltest_1.history)), [x[1] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# In[13]:
plt.title("Accuracy Comparison")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[2] for x in modeltest_1.history], label="std")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[3] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# ## Standard LSTM
# I choose to keep a very low weight decay because assigning zero seems to cause some problems.
# In[14]:
print('Build model...')
model = get_model(edrop=0, rdrop=0, odrop=0, idrop=0, weight_decay=1e-10)
# In[15]:
modeltest_2 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=1,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[17]:
history_2 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_2])
# In[25]:
best_epoch = np.argmin([x[1] for x in modeltest_2.history]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_2.history[best_epoch-1][1],
modeltest_2.history[best_epoch-1][3] * 100,
best_epoch
))
# ## LSTM with Standard Dropout (different mask at differnt time steps)
# In[20]:
print('Build model...')
model = get_model(edrop=0.25, rdrop=0, odrop=0.25, idrop=0, weight_decay=1e-4)
# In[21]:
modeltest_3 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=10,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[22]:
history_3 =model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_3])
# In[24]:
best_epoch = np.argmin([x[1] for x in modeltest_3.history[:19]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_3.history[best_epoch-1][1],
modeltest_3.history[best_epoch-1][3] * 100,
best_epoch
))
# ## Visualizations
# In[40]:
bins = np.arange(-0.1, 0.035, 0.01)
# In[53]:
len(history_2.history["binary_accuracy"])
# In[54]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Accuracy Comparison - Training Set")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_1.history["binary_accuracy"][:20]) * 100, label="variational")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_2.history["binary_accuracy"]) * 100, "g-", label="no dropout")
plt.plot(np.arange(len(history_3.history["binary_accuracy"])),
np.array(history_3.history["binary_accuracy"]) * 100, "y-", label="naive dropout")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.title("(MC - Approx) Histogram")
plt.hist([x[1] - x[0] for x in modeltest_1.history[:17]], bins=bins, alpha=0.5, label="varational")
plt.hist([x[1] - x[0] for x in modeltest_3.history[:17]], bins=bins, alpha=0.5, label="navie dropout")
plt.legend(loc='best')
plt.xlabel("Difference in Loss")
plt.ylabel("Count")
plt.xticks(fontsize=8, rotation=0)
# In[60]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Log Loss Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[1] for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Log Loss")
plt.subplot(1, 2, 2)
plt.title("Accuracy Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[3] * 100 for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy (%)")
# In[ ]:
| [
"[email protected]"
] | |
de9cdc221b466b438e56e604d354af8db1542009 | 3109aaf72df47f11742aca1c5921f71e03eb9917 | /controls/views.py | 17269ee089a01d4a2c5d8d45186ee3903ba26d07 | [
"MIT"
] | permissive | kofi-teddy/accounts | a225f5639ef8993934fe69ec638d2af19d854c2d | 74633ce4038806222048d85ef9dfe97a957a6a71 | refs/heads/master | 2023-02-19T15:10:20.621628 | 2021-01-23T10:30:27 | 2021-01-23T10:30:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,867 | py | import re
from functools import reduce
from itertools import chain, groupby
from accountancy.mixins import (ResponsivePaginationMixin,
SingleObjectAuditDetailViewMixin)
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import (LoginRequiredMixin,
PermissionRequiredMixin)
from django.contrib.auth.models import Group, User
from django.db import transaction
from django.db.models import prefetch_related_objects
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import (CreateView, DetailView, ListView,
TemplateView, UpdateView)
from nominals.models import NominalTransaction
from simple_history.utils import (bulk_create_with_history,
bulk_update_with_history)
from users.mixins import LockDuringEditMixin
from users.models import UserSession
from controls.forms import (UI_PERMISSIONS, AdjustFinancialYearFormset,
FinancialYearForm,
FinancialYearInlineFormSetCreate, GroupForm,
ModuleSettingsForm, PeriodForm, UserForm)
from controls.helpers import PermissionUI
from controls.models import FinancialYear, ModuleSettings, Period
from controls.widgets import CheckboxSelectMultipleWithDataAttr
class ControlsView(LoginRequiredMixin, TemplateView):
template_name = "controls/controls.html"
class GroupsList(LoginRequiredMixin, ResponsivePaginationMixin, ListView):
paginate_by = 25
model = Group
template_name = "controls/group_list.html"
class IndividualMixin:
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["edit"] = self.edit
return context_data
class ReadPermissionsMixin:
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
perms = self.get_perms()
perm_ui = PermissionUI(perms)
for perm in UI_PERMISSIONS()():
perm_ui.add_to_group(perm)
perm_table_rows = perm_ui.create_table_rows()
context_data["perm_table_rows"] = perm_table_rows
return context_data
class GroupDetail(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
ReadPermissionsMixin,
IndividualMixin,
DetailView):
model = Group
template_name = "controls/group_detail.html"
edit = False
permission_required = "auth.view_group"
def get_perms(self):
return self.object.permissions.all()
class GroupUpdate(
LoginRequiredMixin,
PermissionRequiredMixin,
LockDuringEditMixin,
SingleObjectAuditDetailViewMixin,
IndividualMixin,
UpdateView):
model = Group
template_name = "controls/group_edit.html"
success_url = reverse_lazy("controls:groups")
form_class = GroupForm
edit = True
permission_required = "auth.change_group"
class GroupCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = Group
template_name = "controls/group_edit.html"
success_url = reverse_lazy("controls:groups")
form_class = GroupForm
permission_required = "auth.add_group"
class UsersList(LoginRequiredMixin, ListView):
paginate_by = 25
model = User
template_name = "controls/users_list.html"
"""
The permissions tab in the UI for the user detail and user edit shows BOTH
the permissions of the groups the user belongs to and the permissions for that particular user.
In edit mode the user only has the option to change the latter.
"""
user_fields_to_show_in_audit = [
'is_superuser',
'username',
'first_name',
'last_name',
'email',
'is_active',
]
class UserDetail(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
ReadPermissionsMixin,
DetailView):
model = User
template_name = "controls/user_detail.html"
edit = False
permission_required = "auth.view_user"
ui_audit_fields = user_fields_to_show_in_audit
def get_perms(self):
user = self.object
user_perms = user.user_permissions.all()
prefetch_related_objects([user], "groups__permissions__content_type")
group_perms = [group.permissions.all() for group in user.groups.all()]
group_perms = list(chain(*group_perms))
if user_perms and group_perms:
return list(set(chain(user_perms, group_perms)))
if user_perms:
return user_perms
if group_perms:
return group_perms
class UserEdit(
LoginRequiredMixin,
PermissionRequiredMixin,
LockDuringEditMixin,
SingleObjectAuditDetailViewMixin,
IndividualMixin,
UpdateView):
model = User
form_class = UserForm
template_name = "controls/user_edit.html"
success_url = reverse_lazy("controls:users")
edit = True
permission_required = "auth.change_user"
ui_audit_fields = user_fields_to_show_in_audit
# because 5 db hits are needed for POST
@transaction.atomic
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_form(self):
form = self.form_class(**self.get_form_kwargs())
user = self.object
prefetch_related_objects([user], "groups__permissions__content_type")
group_perms = [group.permissions.all()
for group in user.groups.all()] # does hit db again
group_perms = list(chain(*group_perms)) # does not hit db again
group_perms = {perm.pk: perm for perm in group_perms}
self.group_perms = group_perms
form.fields["user_permissions"].widget.group_perms = group_perms
return form
def form_valid(self, form):
groups = form.cleaned_data.get("groups")
user_permissions = form.cleaned_data.get("user_permissions")
# because the group permissions are included in the form i.e. checkboxes are ticked for
# permissions which belong to only groups and not users, we need to discount all such permissions
user_permissions = [
perm for perm in user_permissions if perm.pk not in self.group_perms]
form.instance.user_permissions.clear() # hit db
form.instance.user_permissions.add(*user_permissions) # hit db
form.instance.groups.clear() # hit db
form.instance.groups.add(*groups) # hit db
response = super().form_valid(form)
# this deletes the current user session
update_session_auth_hash(self.request, self.object)
UserSession.objects.create(
user=self.object, session_id=self.request.session.session_key)
return response
class UserCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = User
form_class = UserForm
template_name = "controls/user_edit.html"
success_url = reverse_lazy("controls:users")
permission_required = "auth.add_user"
def get_form(self):
self.form_class.declared_fields["user_permissions"].widget = CheckboxSelectMultipleWithDataAttr(
attrs={
"data-option-attrs": [
"codename",
"content_type__app_label",
],
}
)
form = super().get_form()
return form
class FinancialYearList(ListView):
model = FinancialYear
template_name = "controls/fy_list.html"
def convert_month_years_to_full_dates(post_data_copy):
for k, v in post_data_copy.items():
if re.search(r"month_start", k):
if v:
v = "01-" + v
if re.search(r"01-\d{2}-\d{4}", v):
post_data_copy[k] = v
return post_data_copy
class FinancialYearCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = FinancialYear
template_name = 'controls/fy_create.html'
form_class = FinancialYearForm
success_url = reverse_lazy("controls:index")
permission_required = "controls.add_financialyear"
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
if self.request.POST:
d = convert_month_years_to_full_dates(self.request.POST.copy())
context_data["periods"] = FinancialYearInlineFormSetCreate(
d, prefix="period")
else:
context_data["periods"] = FinancialYearInlineFormSetCreate(
prefix="period")
return context_data
def form_valid(self, form):
context_data = self.get_context_data()
periods = context_data["periods"]
if periods.is_valid():
fy = form.save()
self.object = fy
periods.instance = fy
periods.save(commit=False)
period_instances = [p.instance for p in periods]
period_instances.sort(key=lambda p: p.month_start)
i = 1
for period in period_instances:
period.fy_and_period = f"{fy.financial_year}{str(i).rjust(2, '0')}"
period.period = str(i).rjust(2, '0')
i = i + 1
bulk_create_with_history(
[*period_instances],
Period
)
first_period_of_fy = fy.first_period()
mod_settings = ModuleSettings.objects.first()
# when a FY is created for the first time we need to set the default
# posting periods for each posting module in the software
for setting, period in mod_settings.module_periods().items():
if not period:
setattr(mod_settings, setting, first_period_of_fy)
mod_settings.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(context_data)
class FinancialYearDetail(LoginRequiredMixin, PermissionRequiredMixin, DetailView):
model = FinancialYear
template_name = "controls/fy_detail.html"
context_object_name = "financial_year"
permission_required = "controls.view_financialyear"
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
periods = self.object.periods.all()
context_data["periods"] = periods
return context_data
class AdjustFinancialYear(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = Period
template_name = "controls/fy_adjust.html"
form_class = AdjustFinancialYearFormset
success_url = reverse_lazy("controls:fy_list")
prefix = "period"
permission_required = "controls.change_fy"
def get_object(self):
# form is in fact a formset
# so every period object can be edited
return None
def get_success_url(self):
return self.success_url
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.pop("instance")
kwargs["queryset"] = Period.objects.all()
return kwargs
def form_invalid(self, formset):
if any([form.non_field_errors() for form in formset]):
formset.has_non_field_errors = True
if formset.non_form_errors():
formset.has_non_field_errors = True
return super().form_invalid(formset)
def form_valid(self, formset):
formset.save(commit=False)
fy_has_changed = {} # use dict to avoid recording multiple occurences of the same
# FY being affected
for form in formset:
if 'fy' in form.changed_data:
fy_id = form.initial.get("fy")
fy_queryset = form.fields["fy"]._queryset
fy = next(fy for fy in fy_queryset if fy.pk == fy_id)
fy_has_changed[fy_id] = fy
# we need to rollback now to the earliest of the financial years which has changed
# do this before we make changes to the period objects and FY objects
fys = [fy for fy in fy_has_changed.values()]
if fys:
earliest_fy_affected = min(fys, key=lambda fy: fy.financial_year)
if earliest_fy_affected:
# because user may not in fact change anything
# if the next year after the earliest affected does not exist no exception is thrown
# the db query just won't delete anything
NominalTransaction.objects.rollback_fy(
earliest_fy_affected.financial_year + 1)
# now all the bfs have been deleted we can change the period objects
instances = [form.instance for form in formset]
fy_period_counts = {}
for fy_id, periods in groupby(instances, key=lambda p: p.fy_id):
fy_period_counts[fy_id] = len(list(periods))
fys = FinancialYear.objects.all()
for fy in fys:
fy.number_of_periods = fy_period_counts[fy.pk]
# no point auditing this
FinancialYear.objects.bulk_update(fys, ["number_of_periods"])
bulk_update_with_history(
instances, Period, ["period", "fy_and_period", "fy"])
return HttpResponseRedirect(self.get_success_url())
class ModuleSettingsUpdate(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
UpdateView):
model = ModuleSettings
form_class = ModuleSettingsForm
template_name = "controls/module_settings.html"
success_url = reverse_lazy("controls:index")
permission_required = "controls.change_modulesettings"
def get_object(self):
return ModuleSettings.objects.first()
| [
"[email protected]"
] | |
4aafe1f881c5b33b219068a5220f67354a33717f | c72252f96a1021ba3f9b812020b74bda258bf465 | /S12学习/day3/code/configfile.py | 8a00a0bc88d4cfb073f70be09115f0b43d8c233f | [] | no_license | yzwy1988/cloud | 0251af05b8cc2a8fffdc6f739a01ba9383353dc5 | 6e87f26497072f41b20c1b0696e5605a52987c50 | refs/heads/master | 2021-01-17T22:19:52.327370 | 2016-02-22T10:34:59 | 2016-02-22T10:34:59 | 52,455,959 | 2 | 0 | null | 2016-02-24T16:14:50 | 2016-02-24T16:14:49 | null | UTF-8 | Python | false | false | 1,267 | py | # /usr/bin/env python
# -*- coding:utf-8 -*-
# startswith 是否以某个字段开头的
import json
def check(backend):
check_list = []
flag = False
with open('back', 'r') as f:
for line in f:
if line.startswith('backend'):
if backend == line.strip().split()[1]: # strip 换行,split 去掉空格
flag = True
continue
if flag and line.startswith('backend'):
break
if flag and line.strip():
check_list.append(line)
return check_list
def add(inp_dic):
add_mess = 'server %s weight % maxconn % ' % (inp_)
def menu():
print('''
****************
1 查看数据
2 添加数据
3 删除数据
****************
''')
def main():
menu()
action = input('请选择操作序号:')
if action == '1':
backend = input('''请按如下格式输入要操作的字段:
www.oldboy.org
''')
check(backend)
if action == '2':
inp_data = input('''
请按如下格式输入要操作的字段:
server 100.1.7.9 100.1.7.9 weight 20 maxconn 3000
''')
inp_dic = json.loads()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f9edc8d9a223c008a70ef3224c3054621286d518 | 12258001571bd504223fbf4587870960fa93a46d | /client/config.py | a629d5d3999e56e775ec3430d476a68ae01ea7a4 | [] | no_license | Nik0las1984/mud-obj | 0bd71e71855a9b0f0d3244dec2c877bd212cdbd2 | 5d74280724ff6c6ac1b2d3a7c86b382e512ecf4d | refs/heads/master | 2023-01-07T04:12:33.472377 | 2019-10-11T09:10:14 | 2019-10-11T09:10:14 | 69,223,190 | 2 | 0 | null | 2022-12-26T20:15:20 | 2016-09-26T07:11:49 | Python | UTF-8 | Python | false | false | 190 | py | # coding=utf-8
auto_login = False
try:
from local_config import *
except ImportError, e:
print 'Unable to load local_config.py:', e
if 'plugins' not in locals():
plugins = []
| [
"[email protected]"
] | |
6d7330abeb85dd4954ae55bd45295a5be17a49bd | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w10_opencv/source/OpenCV_in_Ubuntu/Python/mycam_02.py | a69e21c219e5ed6a45cf86fad76f32c973c641fb | [] | no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | import numpy as np
import cv2
def receive():
cap = cv2.VideoCapture('udpsrc port=5200 caps=application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H264,payload=(int)96!rtph264depay!decodebin!videoconvert!appsink',cv2.CAP_GSTREAMER)
while True:
ret,frame = cap.read()
if not ret:
print('empty frame')
continue
cv2.imshow('receive', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap.release()
receive(); | [
"[email protected]"
] | |
da183faec87314655b87ce430d6c703df9991366 | 4ef688b93866285bcc27e36add76dc8d4a968387 | /moto/ds/responses.py | 46d204c1e27ec3b9a35fcf38df9cfb7e7319d764 | [
"Apache-2.0"
] | permissive | localstack/moto | cec77352df216cac99d5e0a82d7ada933950a0e6 | b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff | refs/heads/localstack | 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 | Apache-2.0 | 2023-09-07T02:07:17 | 2018-01-25T00:10:03 | Python | UTF-8 | Python | false | false | 6,529 | py | """Handles Directory Service requests, invokes methods, returns responses."""
import json
from moto.core.exceptions import InvalidToken
from moto.core.responses import BaseResponse
from moto.ds.exceptions import InvalidNextTokenException
from moto.ds.models import ds_backends, DirectoryServiceBackend
class DirectoryServiceResponse(BaseResponse):
"""Handler for DirectoryService requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="ds")
@property
def ds_backend(self) -> DirectoryServiceBackend:
"""Return backend instance specific for this region."""
return ds_backends[self.current_account][self.region]
def connect_directory(self) -> str:
"""Create an AD Connector to connect to a self-managed directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
connect_settings = self._get_param("ConnectSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.connect_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
connect_settings=connect_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_directory(self) -> str:
"""Create a Simple AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
vpc_settings = self._get_param("VpcSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
vpc_settings=vpc_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_alias(self) -> str:
"""Create an alias and assign the alias to the directory."""
directory_id = self._get_param("DirectoryId")
alias = self._get_param("Alias")
response = self.ds_backend.create_alias(directory_id, alias)
return json.dumps(response)
def create_microsoft_ad(self) -> str:
"""Create a Microsoft AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
vpc_settings = self._get_param("VpcSettings")
edition = self._get_param("Edition")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_microsoft_ad(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
vpc_settings=vpc_settings,
edition=edition,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def delete_directory(self) -> str:
"""Delete a Directory Service directory."""
directory_id_arg = self._get_param("DirectoryId")
directory_id = self.ds_backend.delete_directory(directory_id_arg)
return json.dumps({"DirectoryId": directory_id})
def describe_directories(self) -> str:
"""Return directory info for the given IDs or all IDs."""
directory_ids = self._get_param("DirectoryIds")
next_token = self._get_param("NextToken")
limit = self._get_int_param("Limit")
try:
(directories, next_token) = self.ds_backend.describe_directories(
directory_ids, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"DirectoryDescriptions": [x.to_dict() for x in directories]}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
def disable_sso(self) -> str:
"""Disable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.disable_sso(directory_id, username, password)
return ""
def enable_sso(self) -> str:
"""Enable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.enable_sso(directory_id, username, password)
return ""
def get_directory_limits(self) -> str:
"""Return directory limit information for the current region."""
limits = self.ds_backend.get_directory_limits()
return json.dumps({"DirectoryLimits": limits})
def add_tags_to_resource(self) -> str:
"""Add or overwrite on or more tags for specified directory."""
resource_id = self._get_param("ResourceId")
tags = self._get_param("Tags")
self.ds_backend.add_tags_to_resource(resource_id=resource_id, tags=tags)
return ""
def remove_tags_from_resource(self) -> str:
"""Removes tags from a directory."""
resource_id = self._get_param("ResourceId")
tag_keys = self._get_param("TagKeys")
self.ds_backend.remove_tags_from_resource(
resource_id=resource_id, tag_keys=tag_keys
)
return ""
def list_tags_for_resource(self) -> str:
"""Lists all tags on a directory."""
resource_id = self._get_param("ResourceId")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
try:
tags, next_token = self.ds_backend.list_tags_for_resource(
resource_id=resource_id, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"Tags": tags}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
| [
"[email protected]"
] | |
49919addd199e8a7aff5d7ceb03465d0ee8fa6c8 | 3da6b8a0c049a403374e787149d9523012a1f0fc | /网易云课堂/Python办公自动化实战/01_开启自动化人生/batch_docs.py | d407f2929fd181400dee176ff02cc8571a3889b9 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 790 | py | # -*- coding: utf-8 -*-
# @Time : 2021/5/6 8:22 下午
# @Author : AI悦创
# @FileName: batch_docs.py.py
# @Software: PyCharm
# @Blog :http://www.aiyc.top
# @公众号 :AI悦创
from docx import Document # 创建文档
from docx.oxml.ns import qn # 中文
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT # 段落
from docx.shared import Pt, RGBColor, Mm, Cm # 大小磅数/字号
import random
import qrcode
from openpyxl import load_workbook
import xlrd
def qr_code():
# 生成签到码字
signin_code = random.randint(1000, 9999)
img = qrcode.make('%s' % signin_code)
filename = '%s.png' % signin_code
img.save('qr/%s' % filename)
return filename
def excel_read():
file = xlrd.open_workbook('students.xlsx')
sheet = file.sheet_by_name(file.sheet_names()[0]) | [
"[email protected]"
] | |
142d52ca9c1eefcf1920bcf440428ffc4f039da6 | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist.py | 92edc1ea33c0ac79f071983a1fb2e9e4be4ab7a5 | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,077 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist
short_description: Advertised prefix list.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
vlan:
description: the parameter (vlan) in requested url
type: str
required: true
dynamic_mapping:
description: the parameter (dynamic_mapping) in requested url
type: str
required: true
fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
description: the top level parameters set
required: false
type: dict
suboptions:
autonomous-flag:
type: str
description: no description
choices:
- 'disable'
- 'enable'
dnssl:
description: no description
type: str
onlink-flag:
type: str
description: no description
choices:
- 'disable'
- 'enable'
preferred-life-time:
type: int
description: no description
prefix:
type: str
description: no description
rdnss:
description: no description
type: str
valid-life-time:
type: int
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Advertised prefix list.
fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
vlan: <your own value>
dynamic_mapping: <your own value>
state: <value in [present, absent]>
fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
autonomous-flag: <value in [disable, enable]>
dnssl: <value of string>
onlink-flag: <value in [disable, enable]>
preferred-life-time: <value of integer>
prefix: <value of string>
rdnss: <value of string>
valid-life-time: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/global/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list',
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list'
]
perobject_jrpc_urls = [
'/pm/config/global/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list/{ip6-prefix-list}',
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list/{ip6-prefix-list}'
]
url_params = ['adom', 'vlan', 'dynamic_mapping']
module_primary_key = None
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'vlan': {
'required': True,
'type': 'str'
},
'dynamic_mapping': {
'required': True,
'type': 'str'
},
'fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist': {
'required': False,
'type': 'dict',
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'autonomous-flag': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dnssl': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'onlink-flag': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'preferred-life-time': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'prefix': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'rdnss': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'valid-life-time': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1babf3615721b1fdb611c2f462dddbe3f692de44 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/proc/procmemhist1d.py | be140757889fe189e82b006962eee9f8a0791f1e | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 16,922 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ProcMemHist1d(Mo):
"""
A class that represents historical statistics for Process memory in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.proc.ProcMemHist1d", "Process memory")
counter = CounterMeta("used", CounterCategory.GAUGE, "kB", "Used memory")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "usedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "usedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "usedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "usedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "usedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "usedTr"
meta._counters.append(counter)
counter = CounterMeta("alloced", CounterCategory.GAUGE, "kB", "Allocated memory")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "allocedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "allocedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "allocedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "allocedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "allocedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "allocedTr"
meta._counters.append(counter)
meta.moClassName = "procProcMemHist1d"
meta.rnFormat = "HDprocProcMem1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Process memory stats in 1 day"
meta.writeAccessMask = 0x800000000000001
meta.readAccessMask = 0x800000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.proc.Proc")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.proc.ProcMemHist")
meta.rnPrefixes = [
('HDprocProcMem1d-', True),
]
prop = PropMeta("str", "allocedAvg", "allocedAvg", 10623, PropCategory.IMPLICIT_AVG)
prop.label = "Allocated memory average value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedAvg", prop)
prop = PropMeta("str", "allocedMax", "allocedMax", 10622, PropCategory.IMPLICIT_MAX)
prop.label = "Allocated memory maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedMax", prop)
prop = PropMeta("str", "allocedMin", "allocedMin", 10621, PropCategory.IMPLICIT_MIN)
prop.label = "Allocated memory minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedMin", prop)
prop = PropMeta("str", "allocedSpct", "allocedSpct", 10624, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Allocated memory suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedSpct", prop)
prop = PropMeta("str", "allocedThr", "allocedThr", 10625, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Allocated memory thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("allocedThr", prop)
prop = PropMeta("str", "allocedTr", "allocedTr", 10626, PropCategory.IMPLICIT_TREND)
prop.label = "Allocated memory trend"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 7047, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "usedAvg", "usedAvg", 10644, PropCategory.IMPLICIT_AVG)
prop.label = "Used memory average value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedAvg", prop)
prop = PropMeta("str", "usedMax", "usedMax", 10643, PropCategory.IMPLICIT_MAX)
prop.label = "Used memory maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedMax", prop)
prop = PropMeta("str", "usedMin", "usedMin", 10642, PropCategory.IMPLICIT_MIN)
prop.label = "Used memory minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedMin", prop)
prop = PropMeta("str", "usedSpct", "usedSpct", 10645, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Used memory suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("usedSpct", prop)
prop = PropMeta("str", "usedThr", "usedThr", 10646, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Used memory thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("usedThr", prop)
prop = PropMeta("str", "usedTr", "usedTr", 10647, PropCategory.IMPLICIT_TREND)
prop.label = "Used memory trend"
prop.isOper = True
prop.isStats = True
meta.props.add("usedTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
4426687fcdb98f8446d4f07841bc72249015469b | 5173c3e3956387a3f2ae8fcf4aed7c7a600dac78 | /Programmers/Programmers_입국심사.py | 0b401b3a4fa57dd39d85c7899098df041a3e441f | [] | no_license | ma0723/Min_Algorithm | df75f53f6e89b7817d4b52d686effb8236a4ddac | b02d1043008cb32e22daa9d4207b9a45f111d66f | refs/heads/master | 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | def solution(n, times):
# 입국심사를 기다리는 사람 수 n
# 한 명을 심사하는데 걸리는 시간이 담긴 배열 times
answer = 0
left = 1
# 최소 시간
right = n * max(times)
# 최대 시간
while left <= right:
mid = (left + right) // 2
people = 0
for time in times:
people += mid // time
# 설정된 시간동안 각 심사대 처리 사람수
if people >= n:
# n명이 넘어가면
answer = mid
right = mid - 1
# mid 중간값보다 작은 값 탐색
break
# 시간초과 방지
# for문 종료
if people < n:
# for문을 모두 순회하고 처리한 사람이 n명이 충족하지 못하면
left = mid + 1
# mid 중 | [
"[email protected]"
] | |
fc4ee268dd12250989e2ef7da583d9b11063e8d7 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3249.py | 502f170e217ed9f5f27e6f70e1ca274e34def381 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/991CE2DB-8189-7D41-A40D-75A46C5E3FAE.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3249.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
862acd6512fcd275ad31407a805f4042dc0f7f1a | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_3055.py | d3ddd7faf2f926afa7c10c6d1e6b93350df9ca44 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,838 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((603.214, 485.129, 451.385), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((541.749, 495.92, 477.899), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((467.324, 495.674, 512.203), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((556.434, 391.186, 540.366), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((283.035, 529.66, 565.734), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((557.795, 485.317, 460.79), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((558.563, 484.876, 459.829), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((533.437, 474.115, 453.383), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((515.033, 462.122, 435.931), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((532.546, 467.24, 414.512), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((551.905, 448.762, 405.545), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((542.507, 428.932, 387.838), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((581.824, 500.246, 457.243), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((497.997, 364.361, 315.965), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((339.062, 447.64, 409.007), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((339.062, 447.64, 409.007), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((364.487, 458.99, 413.049), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((377.071, 484.078, 416.14), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((404.662, 489.561, 420.931), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((432.096, 490.206, 429.903), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((459.506, 494.339, 438.802), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((487.333, 498.6, 446.999), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((350.813, 308.266, 339.256), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((643.588, 676.363, 555.92), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((470.839, 520.017, 471.876), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((470.839, 520.017, 471.876), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((483.756, 496.579, 482.551), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((486.953, 472.926, 498.824), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((484.173, 470.236, 527.431), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((605.482, 456.096, 502.002), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((363.791, 482.892, 558.77), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((557, 476.25, 491.55), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((557.088, 476.203, 491.618), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((544.415, 454.029, 481.026), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((548.922, 442.682, 455.892), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((542.255, 425.539, 434.671), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((523.034, 433.671, 415.706), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((514.935, 447.987, 392.582), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((536.035, 459.928, 377.787), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((525.707, 535.74, 416.368), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((549.35, 381.476, 341.443), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((524.859, 551.968, 459.592), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((523.107, 530.815, 476.035), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((516.553, 486.022, 511.863), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((509.81, 442.117, 548.141), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((587.843, 457.044, 563.839), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((442.375, 371.69, 584.536), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((574.845, 451.027, 504.557), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((561.907, 464.822, 525.322), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((540.488, 479.621, 535.588), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((519.876, 495.995, 545.797), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((496.536, 511.699, 548.556), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((473.732, 528.445, 546.627), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((533.626, 515.875, 493.685), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((412.856, 541.337, 599.637), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
702760dacc77f2e65aaed171a0998dfd7602a7b9 | 3cf21d46cc8213614f5edfe4ebb09df112e5bf44 | /tools/asset_aggregator/name_check.py | 788a6e9eadc9bae3dc73c59a19e06448f19fd6e7 | [
"BSD-3-Clause"
] | permissive | toro09/rotki | abbf06c63bf0191b8a381bad05534bf8541cf212 | 014e7e11521b81c89b5cd2b4082d197da26684ee | refs/heads/master | 2022-12-01T19:09:08.409018 | 2020-08-11T19:34:54 | 2020-08-11T19:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | import sys
from typing import Any, Dict
from asset_aggregator.utils import choose_multiple
# For assets we support but no API has names for. We manually input the names then.
MANUALLY_CHECKED_NAMES = {
'ADADOWN': 'Binance leveraged token ADADOWN',
'ADAUP': 'Binance leveraged token ADAUP',
'BTCDOWN': 'Binance leveraged token BTCDOWN',
'BTCUP': 'Binance leveraged token BTCUP',
'ETHDOWN': 'Binance leveraged token ETHDOWN',
'ETHUP': 'Binance leveraged token ETHUP',
'LINKDOWN': 'Binance leveraged token LINKDOWN',
'LINKUP': 'Binance leveraged token LINKUP',
'AMIS': 'Amis',
'AVA-2': 'Avalon',
'BIDR': 'Binance IDR Stable Coin',
'BITCAR': 'BitCar',
'BMT': 'BMChain',
'BOU': 'Boulle',
'BTCE': 'EthereumBitcoin',
'BTE': 'BTEcoin',
'BTH': 'Bytether',
'BTR-2': 'Bither',
'CET-2': 'DICE Money',
'CFTY': 'Crafty',
'CNTM': 'Connectome',
'CTSI': 'Cartesi',
'CO2': 'Climatecoin',
'CRGO': 'CargoCoin',
'DEPO': 'Depository Network',
'DIP': 'Etherisc',
'DPP': 'Digital Assets Power Play',
'EMT': 'EasyMine',
'ENTRP': 'Hut34 Entropy Token',
'ETHB': 'EtherBTC',
'FIH': 'FidelityHouse',
'FLX': 'BitFlux',
'FORK-2': 'Gastro Advisor Token',
'HBD': 'Hive dollar',
'HIVE': 'Hive',
'HKG': 'Hacker Gold',
'ITM': 'Intimate',
'JOY': 'JOYSO',
'KUE': 'Kuende',
'LGR': 'Logarithm',
'LOON': 'Loon Network',
'ME': 'All.me',
'MILC': 'Micro Licensing Coin',
'MNT': 'Media Network Token',
'MRP': 'Money Rebel',
'MRV': 'Macroverse',
'OAK': 'Acorn Collective',
'OCC-2': 'Original Crypto Coin',
'REA': 'Realisto',
'REDC': 'Red Cab',
'RIPT': 'RiptideCoin',
'RNDR': 'Render Token',
'SKR': 'Skrilla Token',
'SKYM': 'Skymap',
'SPICE': 'Spice VC Token',
'SSH': 'StreamSpace',
'STP': 'StashPay',
'TAN': 'Taklimakan',
'TBT': 'T-Bot',
'TRXBEAR': ' 3X Short TRX Token',
'TRXBULL': ' 3X Long TRX Token',
'URB': 'Urbit Data',
'USDJ': 'USDJ',
'UTI': 'Unicorn Technology International',
'VENUS': 'VenusEnergy',
'WMK': 'WeMark',
'WLK': 'Wolk',
'ZIX': 'Zeex Token',
}
def name_check(
asset_symbol: str,
our_asset: Dict[str, Any],
our_data: Dict[str, Any],
paprika_data: Dict[str, Any],
cmc_data: Dict[str, Any],
) -> Dict[str, Any]:
"""Process the name from coin paprika and coinmarketcap
Then compare to our data and provide choices to clean up the data.
"""
our_name = our_asset.get('name', None)
if our_name:
# If we already got a name from manual input then keep it
return our_data
if asset_symbol in MANUALLY_CHECKED_NAMES:
our_data[asset_symbol]['name'] = MANUALLY_CHECKED_NAMES[asset_symbol]
return our_data
paprika_name = None
if paprika_data:
paprika_name = paprika_data['name']
cmc_name = None
if cmc_data:
cmc_name = cmc_data['name']
if not paprika_name and not cmc_name and asset_symbol:
print(f'No name in any external api for asset {asset_symbol}')
sys.exit(1)
if paprika_name == cmc_name:
# If both external APIs agree just use their name
our_data[asset_symbol]['name'] = paprika_name
return our_data
msg = (
f'For asset {asset_symbol} the possible names are: \n'
f'(1) Coinpaprika: {paprika_name}\n'
f'(2) Coinmarketcap: {cmc_name}\n'
f'Choose a number (1)-(2) to choose which name to use: '
)
choice = choose_multiple(msg, (1, 2))
if choice == 1:
name = paprika_name
elif choice == 2:
if not cmc_name:
print("Chose coinmarketcap's name but it's empty. Bailing ...")
sys.exit(1)
name = cmc_name
our_data[asset_symbol]['name'] = name
return our_data
| [
"[email protected]"
] | |
0b7d6236b66a636e6778572bde8454fb0fa408ca | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Dstruct/Classics/permcomb.py | f6e244e89732645613fef830391f1e2f9dd60db6 | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | "permutation-type operations for sequences"
def permute(list):
if not list: # shuffle any sequence
return [list] # empty sequence
else:
res = []
for i in range(len(list)):
rest = list[:i] + list[i+1:] # delete current node
for x in permute(rest): # permute the others
res.append(list[i:i+1] + x) # add node at front
return res
def subset(list, size):
if size == 0 or not list: # order matters here
return [list[:0]] # an empty sequence
else:
result = []
for i in range(len(list)):
pick = list[i:i+1] # sequence slice
rest = list[:i] + list[i+1:] # keep [:i] part
for x in subset(rest, size-1):
result.append(pick + x)
return result
def combo(list, size):
if size == 0 or not list: # order doesn't matter
return [list[:0]] # xyz == yzx
else:
result = []
for i in range(0, (len(list) - size) + 1): # iff enough left
pick = list[i:i+1]
rest = list[i+1:] # drop [:i] part
for x in combo(rest, size - 1):
result.append(pick + x)
return result
| [
"zui"
] | zui |
39695f540bade7e05ff8fa960c71d068109b1dda | 2dd0bf6e8542b560c2e3567f8793b561cb0678b0 | /code/src/main/python/misconceptions/syntactics/grammar/R.py | 4c300cb4f55146b42613854ab34bcb255fe58cbf | [
"Unlicense"
] | permissive | Eduardo95/COSAL | 021f01cfa86e656c3fe320159c8d25ca5b6f311d | 4eb95d286288aa25a1a90db40cb1998dad048e1b | refs/heads/master | 2023-06-17T08:19:37.925879 | 2021-07-12T16:24:06 | 2021-07-12T16:24:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,354 | py | import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from lark import Lark
R_GRAMMAR = """
start: value (ASSIGNMENT_OPERATOR value)?
// expr: value ( indexer | value | attribute)+
binary_expr: value BINARY_OPERATOR value
unary_expr: UNARY_OPERATOR value
indexer: value "[" value "]"
attribute: value "$" value
value: unary_expr
| binary_expr
| array
| list
| matrix
| data_frame
| tuple
| slice_range
| QUOTED_STRING
| NUMBER
| BOOL
| NULL
| NAME
| if_else
| func_call
| attribute
| indexer
| values
values: value? ("," value?)+
array: ("c" "(" [value ("," value)*] ")") | ("[" value? ("," value?)* "]")
list: "list" "(" [value ("," value)*] ")"
matrix: "matrix" "(" args ")"
data_frame: "data.frame" "(" args ")"
tuple: "(" [value ("," value)*] ")"
QUOTED_STRING : DOUBLE_QUOTED_STRING | SINGLE_QUOTED_STRING | TILDE_QUOTED_STRING
DOUBLE_QUOTED_STRING : /"[^"]*"/
SINGLE_QUOTED_STRING : /'[^']*'/
TILDE_QUOTED_STRING : /`[^']*`/
NAME: ("_"|LETTER) ("_"|LETTER|DIGIT|".")*
BOOL: "TRUE" | "FALSE"
if_else: "ifelse" "(" value "," value "," value")"
slice_range: value? ":" value?
NULL: "NULL" | "NaN"
ASSIGNMENT_OPERATOR: "="
| "<-"
BINARY_OPERATOR: "+"
| "-"
| "**"
| "*"
| "/"
| "^"
| "%%"
| "%/%"
| ">="
| ">"
| "<="
| "<"
| "=="
| "!="
| "|"
| "&"
UNARY_OPERATOR: "!"
| "-"
func_name: NAME | TILDE_QUOTED_STRING
func_args: value ("," value)*
func_kwarg: NAME "=" value
func_kwargs: func_kwarg ("," func_kwarg)*
args: (func_args | func_kwargs | (func_args "," func_kwargs))
//indexer_args: (value | values | func_name)
func_call: func_name "(" args? ")"
// %import common.CNAME -> NAME
%import common.SIGNED_NUMBER -> NUMBER
%import common.LETTER -> LETTER
%import common.DIGIT -> DIGIT
%import common.WORD
%import common.WS
%import common.NEWLINE -> NEWLINE
%ignore WS
"""
def r_parser():
return Lark(R_GRAMMAR)
def _test():
parser = r_parser()
# print(parser.parse("df.iloc[1:2, df[[2]]]"))
# print(parser.parse("df.set_value(dfaxis=8.05)"))
# print(parser.parse('table(df$Parch, df$Survived)'))
print(parser.parse('mean(df$Fare)'))
def verify():
from utils import cache
misconceptions_path = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/misconceptions.xlsx"
wb = cache.read_excel(misconceptions_path, read_only=True)
# sheet = wb.get_sheet_by_name('HighSim-HighSyn')
sheet = wb.get_sheet_by_name('LowSim-LowSyn')
parser = r_parser()
seen = set()
for i, row in enumerate(sheet.iter_rows()):
if i == 0:
continue
snippet = row[0].value
if i >= 1 and snippet not in seen:
print(i, snippet)
seen.add(snippet)
parser.parse(snippet)
elif i % 100 == 0:
print("Dont worry I'm running", i)
if __name__ == "__main__":
# verify()
_test() | [
"[email protected]"
] | |
2718c3441138bf66c7e26a309ed95597a6632432 | 19375a18719e44eee7c596e72ef8915d3fcbff92 | /day07_spider/06_qq.py | e29058c5d79b1d50650db9c34bab04e364ccb3bd | [] | no_license | J-shan0903/AID1912 | 6c617fa26751c31ff05a63050a320122e3ca044e | 0797f3d8ef0e96b8eb6908dffbec8193c9614973 | refs/heads/master | 2021-03-23T12:21:32.480026 | 2020-05-23T08:36:21 | 2020-05-23T08:36:21 | 247,452,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from selenium import webdriver
driver = webdriver.Chrome()
driver.get(url='https://mail.qq.com/')
driver.switch_to.frame('login_frame')
driver.find_element_by_id('u').send_keys('[email protected]')
driver.find_element_by_id('p').send_keys('353597jss')
driver.find_elements_by_class_name('btn').click() | [
"[email protected]"
] | |
111e4e3b3e118be47d757ed5f872a5057ef0e42c | 4a307849ed4dded5ce84b0ceb6d2cf56c2e64b89 | /common/servicechain/firewall/verify.py | b43cb7d94e9c935986e1e607a161918f1394dedd | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | lmadhusudhanan/contrail-test | a6316b41dcb836315d25503f1dee511943d7f976 | bd39ff19da06a20bd79af8c25e3cde07375577cf | refs/heads/master | 2022-05-04T20:01:58.960911 | 2018-06-27T17:56:47 | 2018-06-27T17:56:47 | 138,913,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,312 | py | import os
import re
from common.servicechain.verify import VerifySvcChain
from tcutils.util import get_random_cidr
from tcutils.util import get_random_name
from tcutils.util import retry
SVC_TYPE_PROPS = {
'firewall': {'in-network-nat': 'tiny_nat_fw',
'in-network': 'tiny_in_net',
'transparent': 'tiny_trans_fw',
},
'analyzer': {'transparent': 'analyzer',
'in-network' : 'analyzer',
}
}
class VerifySvcFirewall(VerifySvcChain):
def verify_svc_span(self, in_net=False):
vn1_name = get_random_name("left_vn")
vn1_subnets = ['31.1.1.0/24']
vm1_name = get_random_name('left_vm')
vn2_name = get_random_name("right_vn")
vn2_subnets = ['41.2.2.0/24']
vm2_name = get_random_name('right_vm')
if in_net:
vn1_name = get_random_name("in_left_vn")
vn1_subnets = ['32.1.1.0/24']
vm1_name = get_random_name('in_left_vm')
vn2_name = get_random_name("in_right_vn")
vn2_subnets = ['42.2.2.0/24']
vm2_name = get_random_name('in_right_vm')
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
vm1_fixture = self.config_vm(vm1_name, vn_fix=vn1_fixture)
vm2_fixture = self.config_vm(vm2_name, vn_fix=vn2_fixture)
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
max_inst = 3
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [tcp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'tcp',
'source_network': vn1_name,
'src_ports': [8000, 8000],
'dest_network': vn2_name,
'dst_ports': [9000, 9000],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
tcp_policy_fixture = self.config_policy(policy_name, rule)
self.verify_si(tcp_si_fixture)
st_name = get_random_name("udp_svc_template")
si_prefix = "udp_bridge_"
policy_name = get_random_name("allow_udp")
if in_net:
st_name = get_random_name("in_udp_svc_template")
si_prefix = "in_udp_bridge_"
policy_name = get_random_name("in_allow_udp")
udp_st_fixture, udp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
udp_st_fixture, udp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [udp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
# Update rule with specific port/protocol
rule = [{'direction': '<>',
'protocol': 'udp',
'source_network': vn1_name,
'src_ports': [8001, 8001],
'dest_network': vn2_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': {'apply_service': action_list}
}]
# Create new policy with rule to allow traffci from new VN's
udp_policy_fixture = self.config_policy(policy_name, rule)
vn1_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn1_fixture)
vn2_udp_policy_fix = self.attach_policy_to_vn(
[tcp_policy_fixture, udp_policy_fixture], vn2_fixture)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
assert self.verify_si(udp_si_fixtures)
# Install traffic package in VM
vm1_fixture.install_pkg("Traffic")
vm2_fixture.install_pkg("Traffic")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.delete_si_st(tcp_si_fixtures, tcp_st_fixture)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
st_name = get_random_name("tcp_svc_template")
si_prefix = "tcp_bridge_"
policy_name = get_random_name("allow_tcp")
if in_net:
st_name = get_random_name("in_tcp_svc_template")
si_prefix = "in_tcp_bridge_"
policy_name = get_random_name("in_allow_tcp")
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst,
left_vn_fixture=vn1_fixture, right_vn_fixture=vn2_fixture)
else:
tcp_st_fixture, tcp_si_fixture = self.config_st_si(
st_name, si_prefix, max_inst=max_inst)
action_list = [tcp_si_fixture.fq_name_str]
# action_list = self.chain_si(si_count, si_prefix)
result, msg = self.validate_vn(vn1_name)
assert result, msg
result, msg = self.validate_vn(vn2_name)
assert result, msg
self.verify_si(tcp_si_fixtures)
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
def verify_multi_inline_svc(self, *args, **kwargs):
ret_dict = self.config_multi_inline_svc(*args, **kwargs)
proto = kwargs.get('proto', 'any')
left_vn_fq_name = ret_dict.get('left_vn_fixture').vn_fq_name
right_vn_fq_name = ret_dict.get('right_vn_fixture').vn_fq_name
left_vm_fixture = ret_dict.get('left_vm_fixture')
right_vm_fixture = ret_dict.get('right_vm_fixture')
st_fixtures = ret_dict.get('st_fixtures')
si_fixtures = ret_dict.get('si_fixtures')
for i in range(len(st_fixtures)):
assert st_fixtures[i].verify_on_setup(), 'ST Verification failed'
assert si_fixtures[i].verify_on_setup(), 'SI Verification failed'
result, msg = self.validate_vn(left_vn_fq_name)
assert result, msg
result, msg = self.validate_vn(right_vn_fq_name, right_vn=True)
assert result, msg
result, msg = self.validate_svc_action(
left_vn_fq_name, si_fixtures[0], right_vm_fixture, src='left')
assert result, msg
if proto not in ['any', 'icmp']:
self.logger.info('Will skip Ping test')
else:
# Ping from left VM to right VM
errmsg = "Ping to Right VM %s from Left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, count='3'), errmsg
return ret_dict
# end verify_multi_inline_svc
def verify_policy_delete_add(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Ping from left VM to right VM; expected to fail
errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
# Create policy again
policy_fixture = self.config_policy(policy_fixture.policy_name,
policy_fixture.rules_list)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
assert self.verify_si(si_fixture)
# Wait for the existing flow entry to age
self.sleep(40)
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
return True
# end verify_policy_delete_add
def verify_protocol_port_change(self, si_test_dict, mode='transparent'):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
si_fixture = si_test_dict['si_fixture']
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sport = 8000
dport = 9001
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Update rule with specific port/protocol
#action_list = {'apply_service': self.action_list}
action_list = policy_fixture.rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'tcp',
'source_network': si_test_dict['left_vn_fixture'].vn_fq_name,
'src_ports': [8000, 8000],
'dest_network': si_test_dict['right_vn_fixture'].vn_fq_name,
'dst_ports': [9001, 9001],
'simple_action': None,
'action_list': action_list
}
rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
assert self.verify_si(si_fixture)
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s passed; Expected to fail" % (
sport, dport)
assert sent and recv == 0, errmsg
sport = 8000
dport = 9001
self.logger.debug("Send tcp traffic; with policy rule %s", new_rule)
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'tcp', sport=sport, dport=dport)
errmsg = "TCP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# verify_protocol_port_change
def verify_add_new_vns(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Create one more left and right VN's
new_left_vn = "new_left_bridge_vn"
new_left_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_right_vn = "new_right_bridge_vn"
new_right_vn_net = [get_random_cidr(af=self.inputs.get_af())]
new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net)
new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net)
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(new_left_vm, vn_fix=new_left_vn_fix)
new_right_vm_fix = self.config_vm(new_right_vm, vn_fix=new_right_vn_fix)
# Wait for VM's to come up
new_left_vm_fix.wait_till_vm_is_up()
new_right_vm_fix.wait_till_vm_is_up()
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
action_list = policy_fixture.input_rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'any',
'source_network': new_left_vn,
'src_ports': [0, 65535],
'dest_network': new_right_vn,
'dst_ports': [0, 65535],
'simple_action': action_list.get('simple_action', None),
'action_list': action_list,
}
rules = policy_fixture.input_rules_list
rules.append(new_rule)
# Create new policy with rule to allow traffic from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_right_vn_fix)
self.verify_si(si_fixture)
# Ping from left VM to right VM
self.sleep(5)
self.logger.info("Verfiy ICMP traffic between new VN's.")
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected tp Fail" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(right_vm_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing VN's with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
self.logger.info(
"Verfiy ICMP traffic between existing left VN and new right VN.")
errmsg = "Ping to right VM ip %s from left VM passed; \
Expected to Fail" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Ping between left VN's
self.logger.info(
"Verfiy ICMP traffic between new left VN and existing left VN.")
errmsg = "Ping to left VM ip %s from another left VM in different VN \
passed; Expected to fail" % left_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(left_vm_fixture.vm_ip,
expectation=False), errmsg
self.logger.info(
"Verfiy ICMP traffic between new right VN and existing right VN.")
errmsg = "Ping to right VM ip %s from another right VM in different VN \
passed; Expected to fail" % right_vm_fixture.vm_ip
assert new_right_vm_fix.ping_with_certainty(right_vm_fixture.vm_ip,
expectation=False), errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.unconfig_policy(policy_fixture)
# Add rule to policy to allow only tcp traffic from new left_vn to right_vn
# through SI
rules.remove(new_rule)
udp_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': new_left_vn,
'src_ports': [8000, 8000],
'dest_network': new_right_vn,
'dst_ports': [9000, 9000],
'simple_action': action_list.get('simple_action', None),
'action_list': {'apply_service': action_list['apply_service']}
}
rules.append(udp_rule)
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
# attach policy to new VN's
new_policy_left_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_left_vn_fix)
new_policy_right_vn_fix = self.attach_policy_to_vn(
policy_fixture, new_right_vn_fix)
self.verify_si(si_fixture)
# Ping from left VM to right VM with udp rule
self.logger.info(
"Verify ICMP traffic with allow udp only rule from new left VN to new right VN")
errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(new_right_vm_fix.vm_ip,
expectation=False), errmsg
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.info(
"Verify UDP traffic with allow udp only rule from new left VN to new right VN")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
self.logger.info("Verfiy ICMP traffic with allow all.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
self.logger.info("Verify UDP traffic with allow all")
sport = 8001
dport = 9001
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.delete_vm(new_left_vm_fix)
self.delete_vm(new_right_vm_fix)
self.detach_policy(new_policy_left_vn_fix)
self.detach_policy(new_policy_right_vn_fix)
self.delete_vn(new_left_vn_fix)
self.delete_vn(new_right_vn_fix)
self.verify_si(si_fixture)
self.logger.info(
"Icmp traffic with allow all after deleting the new left and right VN.")
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
# end verify_add_new_vns
def verify_add_new_vms(self, si_test_dict):
left_vn_policy_fix = si_test_dict['left_vn_policy_fix']
right_vn_policy_fix = si_test_dict['right_vn_policy_fix']
policy_fixture = si_test_dict['policy_fixture']
left_vm_fixture = si_test_dict['left_vm_fixture']
right_vm_fixture = si_test_dict['right_vm_fixture']
si_fixture = si_test_dict['si_fixture']
left_vn_fixture = si_test_dict['left_vn_fixture']
right_vn_fixture = si_test_dict['right_vn_fixture']
# Launch VMs in new left and right VN's
new_left_vm = 'new_left_bridge_vm'
new_right_vm = 'new_right_bridge_vm'
new_left_vm_fix = self.config_vm(new_left_vm, vn_fix=left_vn_fixture)
new_right_vm_fix = self.config_vm(new_right_vm, vn_fix=right_vn_fixture)
# Wait for VM's to come up
assert new_left_vm_fix.wait_till_vm_is_up()
assert new_right_vm_fix.wait_till_vm_is_up()
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip), errmsg
errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip), errmsg
# Install traffic package in VM
left_vm_fixture.install_pkg("Traffic")
right_vm_fixture.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule allow all")
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Delete policy
self.detach_policy(left_vn_policy_fix)
self.detach_policy(right_vn_policy_fix)
self.unconfig_policy(policy_fixture)
# Add rule to policy to allow traffic from new left_vn to right_vn
# through SI
action_list = policy_fixture.rules_list[0]['action_list']
new_rule = {'direction': '<>',
'protocol': 'udp',
'source_network': left_vn_fixture.vn_name,
'src_ports': [8000, 8000],
'dest_network': right_vn_fixture.vn_name,
'dst_ports': [9000, 9000],
'action_list': action_list
}
rules = [new_rule]
# Create new policy with rule to allow traffci from new VN's
policy_fixture = self.config_policy(policy_fixture.policy_name, rules)
left_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, left_vn_fixture)
right_vn_policy_fix = self.attach_policy_to_vn(
policy_fixture, right_vn_fixture)
self.verify_si(si_fixture)
# Install traffic package in VM
new_left_vm_fix.install_pkg("Traffic")
new_right_vm_fix.install_pkg("Traffic")
self.logger.debug("Send udp traffic; with policy rule %s", new_rule)
sport = 8000
dport = 9000
sent, recv = self.verify_traffic(left_vm_fixture, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(left_vm_fixture, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
sent, recv = self.verify_traffic(new_left_vm_fix, right_vm_fixture,
'udp', sport=sport, dport=dport)
errmsg = "UDP traffic with src port %s and dst port %s failed" % (
sport, dport)
assert sent and recv == sent, errmsg
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip
assert new_left_vm_fix.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % right_vm_fixture.vm_ip
assert new_left_vm_fix.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % right_vm_fixture.vm_ip
assert left_vm_fixture.ping_with_certainty(
right_vm_fixture.vm_ip, expectation=False), errmsg
errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip
assert left_vm_fixture.ping_with_certainty(
new_right_vm_fix.vm_ip, expectation=False), errmsg
# end verify_add_new_vms
def verify_firewall_with_mirroring(
self, max_inst=1,
firewall_svc_mode='in-network', mirror_svc_mode='transparent'):
"""Validate the service chaining in network datapath"""
#TODO
# max_inst cannot be more than one in this method since
# analyzer packet count verification logic needs to be updated when
# in case of more than one mirror SVM
max_inst = 1
vn1_name = get_random_name('left_vn')
vn2_name = get_random_name('right_vn')
vn1_subnets = [get_random_cidr(af=self.inputs.get_af())]
vn2_subnets = [get_random_cidr(af=self.inputs.get_af())]
vm1_name = get_random_name("in_network_vm1")
vm2_name = get_random_name("in_network_vm2")
action_list = []
firewall_st_name = get_random_name("svc_firewall_template_1")
firewall_si_prefix = get_random_name("svc_firewall_instance")
mirror_st_name = get_random_name("svc_mirror_template_1")
mirror_si_prefix = get_random_name("svc_mirror_instance")
policy_name = get_random_name("policy_in_network")
mgmt_vn_fixture = self.config_vn(get_random_name('mgmt'),
[get_random_cidr(af=self.inputs.get_af())])
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
vns = [mgmt_vn_fixture, vn1_fixture, vn2_fixture]
def firewall_svc_create(vn_list):
st_fixture = self.config_st(firewall_st_name,
service_type='firewall',
service_mode=firewall_svc_mode,
mgmt=getattr(mgmt_vn_fixture, 'vn_fq_name', None),
left=vn_list[1].vn_fq_name,
right=vn_list[2].vn_fq_name)
svm_fixtures = self.create_service_vms(vn_list,
service_mode=st_fixture.service_mode,
service_type=st_fixture.service_type,
max_inst=max_inst)
firewall_si_fixture = self.config_si(firewall_si_prefix,
st_fixture,
max_inst=max_inst,
mgmt_vn_fq_name=getattr(mgmt_vn_fixture, 'vn_fq_name', None),
left_vn_fq_name=vn_list[1].vn_fq_name,
right_vn_fq_name=vn_list[2].vn_fq_name,
svm_fixtures=svm_fixtures)
assert firewall_si_fixture.verify_on_setup()
return firewall_si_fixture
if firewall_svc_mode == 'transparent':
dummy_vn1 = self.config_vn('dummy_vn1', [get_random_cidr(af=self.inputs.get_af())])
dummy_vn2 = self.config_vn('dummy_vn2', [get_random_cidr(af=self.inputs.get_af())])
dummy_vn_list = [mgmt_vn_fixture, dummy_vn1, dummy_vn2]
firewall_si_fixture = firewall_svc_create(dummy_vn_list)
else:
firewall_si_fixture = firewall_svc_create(vns)
action_list = [firewall_si_fixture.fq_name_str]
mirror_st_fixture = self.config_st(mirror_st_name,
service_type='analyzer',
service_mode=mirror_svc_mode,
left=vn1_fixture.vn_fq_name)
mirror_svm_fixtures = self.create_service_vms([vn1_fixture],
service_mode=mirror_st_fixture.service_mode,
service_type=mirror_st_fixture.service_type,
max_inst=max_inst)
mirror_si_fixture = self.config_si(mirror_si_prefix,
mirror_st_fixture,
max_inst=max_inst,
left_vn_fq_name=vn1_fixture.vn_fq_name,
svm_fixtures=mirror_svm_fixtures)
assert mirror_si_fixture.verify_on_setup()
action_list += [mirror_si_fixture.fq_name_str]
rules = [
{
'direction': '<>',
'protocol': 'any',
'source_network': vn1_name,
'src_ports': [0, 65535],
'dest_network': vn2_name,
'dst_ports': [0, 65535],
'simple_action': 'pass',
'action_list': {'simple_action': 'pass',
'mirror_to': {'analyzer_name': action_list[1]},
'apply_service': action_list[:1]}
},
]
policy_fixture = self.config_policy(policy_name, rules)
vn1_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn1_fixture)
vn2_policy_fix = self.attach_policy_to_vn(
policy_fixture, vn2_fixture)
vm1_fixture = self.config_vm(vm1_name, vn_fix=vn1_fixture)
vm2_fixture = self.config_vm(vm2_name, vn_fix=vn2_fixture)
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
result, msg = self.validate_vn(vn1_fixture.vn_fq_name)
assert result, msg
result, msg = self.validate_vn(vn2_fixture.vn_fq_name)
assert result, msg
assert self.verify_si(firewall_si_fixture)
assert self.verify_si(mirror_si_fixture)
svms = self.get_svms_in_si(firewall_si_fixture)
svm_node_ip = svms[0].vm_node_ip
# Ping from left VM to right VM
errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), errmsg
# Verify ICMP mirror
sessions = self.tcpdump_on_all_analyzer(mirror_si_fixture)
errmsg = "Ping to right VM ip %s from left VM failed" % vm2_fixture.vm_ip
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip), errmsg
for svm_name, (session, pcap) in sessions.items():
if vm1_fixture.vm_node_ip == vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'transparent':
count = 20
else:
count = 10
if vm1_fixture.vm_node_ip != vm2_fixture.vm_node_ip:
if firewall_svc_mode == 'in-network' and vm1_fixture.vm_node_ip == svm_node_ip:
count = 10
else:
count = 20
self.verify_icmp_mirror(svm_name, session, pcap, count)
# end verify_firewall_with_mirroring
def verify_ecmp_hash(self, vn_fixture=None, left_vm_fixture=None, right_vm_fixture=None, ecmp_hash='default'):
"""Verify ECMP configuration hash at Agent and control node """
# Verify configured ecmp_hash fileds at agent
result, msg = self.verify_ecmp_hash_at_agent(ecmp_hash=ecmp_hash,
vn_fixture=vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
assert result, msg
# end verify_ecmp_hash
@retry(delay=5, tries=10)
def verify_ecmp_hash_at_agent(self, vn_fixture=None, left_vm_fixture=None, right_vm_fixture=None, ecmp_hash='default'):
"""Verify ECMP configuration hash """
# Default ECMP hash with 5 tuple
if ecmp_hash == 'default':
ecmp_hash = {"source_ip": True, "destination_ip": True,
"source_port": True, "destination_port": True,
"ip_protocol": True}
ecmp_hash_config=[]
# ECMP Hash fileds displayed at agent is different from configured
# values. Mapping is: source_ip : l3-source-address, destination_ip:
# l3-destination-address etc..
if 'source_ip' in ecmp_hash:
ecmp_hash_config.append('l3-source-address')
if 'destination_ip' in ecmp_hash:
ecmp_hash_config.append('l3-destination-address')
if 'source_port' in ecmp_hash:
ecmp_hash_config.append('l4-source-port')
if 'destination_port' in ecmp_hash:
ecmp_hash_config.append('l4-destination-port')
if 'ip_protocol' in ecmp_hash:
ecmp_hash_config.append('l4-protocol')
# Get the ECMP hash next hops at agent
(domain, project, vn) = vn_fixture.vn_fq_name.split(':')
inspect_h = self.agent_inspect[left_vm_fixture.vm_node_ip]
agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn)
agent_vrf_obj = left_vm_fixture.get_matching_vrf( agent_vrf_objs['vrf_list'], vn_fixture.vrf_name)
vn_vrf_id = agent_vrf_obj['ucindex']
# Get the ECMP Hashing fields at agent
ecmp_hashing_fileds = inspect_h.get_vna_active_route(vrf_id=vn_vrf_id, ip=right_vm_fixture.vm_ip, prefix='32')['path_list'][0]['ecmp_hashing_fields']
ecmp_hash_at_agent = ecmp_hashing_fileds.split(',')
# Removing the empty elements
ecmp_hash_at_agent = filter(None, ecmp_hash_at_agent)
# Compare ECMP hash configured value with value programmed at agent
if set(ecmp_hash_at_agent) == set(ecmp_hash_config):
result =True
msg = 'ECMP Hash is configured properly at Agent: {%s}' % ecmp_hashing_fileds
self.logger.info('ECMP Hash is configured properly at Agent: {%s}' % ecmp_hashing_fileds)
else:
result = False
msg = 'ECMP Hash is incorrect at Agent. Configured ECMP Hash is: %s, ECMP Hash present at Agent is:%s' % (ecmp_hash_config, ecmp_hash_at_agent)
self.logger.info('ECMP Hash is incorrect at Agent. Configured ECMP Hash is: %s, ECMP Hash present at Agent is:%s' % (ecmp_hash_config, ecmp_hash_at_agent))
return result, msg
# end verify_ecmp_hash_at_agent
| [
"[email protected]"
] | |
0b3ce647889db5ce8bc43acdb3f0730ff2349fb3 | 70811da75f0f38719703e02c9f99e2ce09e21d2a | /LSTM_FCN/distal_phalanx_tw_model.py | cd18a4f46cd55d512c9381b2a58a3c9a060069cf | [] | no_license | HongminWu/time_series_anomaly_detection_classification_clustering | 9d5e555c9bf37ee72770e127588f61f15139bd4e | 548b3799389ec7a96fc56c51360a6de89e0502a1 | refs/heads/master | 2020-03-11T04:44:00.113684 | 2018-08-16T06:38:57 | 2018-08-16T06:38:57 | 129,783,614 | 15 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | from keras.models import Model
from keras.layers import Input, PReLU, Dense, Dropout, LSTM, Bidirectional, multiply, concatenate
from keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Activation
from utils.constants import MAX_SEQUENCE_LENGTH_LIST, NB_CLASSES_LIST
from utils.keras_utils import train_model, evaluate_model, set_trainable, visualize_context_vector, visualize_cam
from utils.layer_utils import AttentionLSTM
DATASET_INDEX = 11
MAX_SEQUENCE_LENGTH = MAX_SEQUENCE_LENGTH_LIST[DATASET_INDEX]
NB_CLASS = NB_CLASSES_LIST[DATASET_INDEX]
TRAINABLE = True
def generate_model():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = LSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# model.load_weights("weights/phalanx_tw_weights - 7769.h5")
return model
def generate_model_2():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = AttentionLSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# add load model code here to fine-tune
return model
if __name__ == "__main__":
model = generate_model_2()
#train_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', epochs=2000, batch_size=128)
evaluate_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', batch_size=128)
# visualize_context_vector(model, DATASET_INDEX, dataset_prefix='phalanx_tw', visualize_sequence=True,
# visualize_classwise=True, limit=1)
# visualize_cam(model, DATASET_INDEX, dataset_prefix='phalanx_tw', class_id=0)
| [
"[email protected]"
] | |
ef01adb41fcf1f474f98c2f88f09443ee34ec339 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/servicebus/azure-servicebus/tests/async_tests/mgmt_tests/test_mgmt_namespaces_async.py | 77e82602f3a50ce880403bd482c0dcba7293d2b3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 1,489 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
from azure.servicebus.aio.management import ServiceBusAdministrationClient
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import CachedServiceBusNamespacePreparer
class ServiceBusManagementClientNamespaceAsyncTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_namespace_get_properties(self, servicebus_namespace_connection_string,
servicebus_namespace, servicebus_namespace_key_name,
servicebus_namespace_primary_key):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
properties = await mgmt_service.get_namespace_properties()
assert properties
assert properties.messaging_sku == 'Standard'
# assert properties.name == servicebus_namespace.name
# This is disabled pending investigation of why it isn't getting scrubbed despite expected scrubber use.
| [
"[email protected]"
] | |
4799dfae66b08654ba541db4e36bfdab1b6ecd9b | 6382e12a32c3b62ec059ca45c1fee6941e51e260 | /Part_5__Control_Flow/Chap_14__Iterables_Iterators_and_Generators/ex_14_12__aritprog_gen.py | b705450eca5df149e40a62b8325732285db256f8 | [] | no_license | CavalcanteLucas/python-fluent | e352a79e1da87ae4ee320a09196e119235a904a8 | a4e22ab88235c5045eca52745b5e1558586dc166 | refs/heads/master | 2023-07-11T20:13:35.793456 | 2023-06-19T13:37:54 | 2023-06-19T13:37:54 | 224,661,365 | 1 | 0 | null | 2023-02-11T01:30:09 | 2019-11-28T13:39:03 | Jupyter Notebook | UTF-8 | Python | false | false | 233 | py | def aritprog_gen(begin, step, end=None):
result = type(begin + step)(begin)
forever = end is None
index = 0
while forever or result < end:
yield result
index += 1
result = begin + step * index
| [
"[email protected]"
] | |
fbcce6a43ad58373cd35ab45d604f4c91582da33 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/python/LC_935.py | 58a9a16ae88d9c6f9538e65cc80e22da6dfcaf47 | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py |
"""
https://www.youtube.com/watch?v=HTnIFivp0aw
这是一道简单但是比较有趣的题目。DP的方法还是比较容易想到的。令dp[k]表示当前拨号数字为k的方案数,显然它取决于在按k之前的那个数字的拨号方案数之和。
举个例子,第i次拨号时的dp[4]就等于第i-1次拨号时的dp[0]+dp[3]+dp[9],这是因为在盘面上骑士只能从0,3,9这三个位置跳跃到4.
"""
class SolutionTD:
def knightDialer(self, n):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
self.mod = 10 ** 9 + 7
res = 0
memo = {}
for i in range(10):
res += self.dfs(n - 1, i, table, memo)
res %= self.mod
return res
def dfs(self, n, node, table, memo):
if (n, node) in memo:
return memo[(n, node)]
if n == 0:
return 1
res = 0
for nei in table[node]:
res += self.dfs(n - 1, nei, table, memo)
res %= self.mod
memo[(n, node)] = res
return res
class Solution:
def knightDialer(self, N):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
mod = 10 ** 9 + 7
dp = [1] * 10
for _ in range(N - 1):
newDP = [0] * 10
for i in range(10):
for j in table[i]:
newDP[j] += dp[i]
dp = newDP
return sum(dp) % (mod)
| [
"[email protected]"
] | |
95104df4640b4babf14d129503b2955198323497 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/11104121.py | e842e5077a8ce26042b14a549459d60c120ea087 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11104121.py generated: Wed, 25 Jan 2017 15:25:16
#
# Event Type: 11104121
#
# ASCII decay Descriptor: [B0 -> pi+ pi- (KS0 -> pi+ pi-)]cc
#
from Configurables import Generation
Generation().EventType = 11104121
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KSpi+pi-=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"[email protected]"
] | |
1ade39bff263007813db93d12d91966da695744a | e3eee8900296e91601a2f6fea027c7956433e072 | /chap10/dirList.py | c2a90cfdb43d8848cbea15e53bebe83fc3d469b8 | [] | no_license | chc1129/introducing-python3 | 70ff14bbf24f7030a8cc20dba7db753e64b46865 | 43a6de586862380ac221669f11f1fbbac9105bb5 | refs/heads/master | 2020-04-12T21:53:32.031918 | 2019-09-23T12:55:59 | 2019-09-23T12:55:59 | 162,775,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | import os
print(os.listdir('.'))
print(os.listdir('..'))
| [
"[email protected]"
] | |
dd75a4912a4cbb4431c9f3a9493f1d8671208472 | 412ddb81b217ced05f77381a625a6ee26a3b2ea7 | /lib/Stats.py | 11ef31512bc7bf0d2ac2616db64d5d2797753a9e | [] | no_license | AndreasHeger/adda | d26fcb7ba3f32ced351d34b8dac7f802e63219c5 | ddae18476747ef51cc8a2d924b723d5ae81a2da7 | refs/heads/master | 2016-09-10T22:58:51.797341 | 2014-06-26T19:44:39 | 2014-06-26T19:44:39 | 16,487,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,894 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
Stats.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import types
import math
import numpy
import scipy
import scipy.stats
import scipy.interpolate
import collections
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
def getSignificance( pvalue, thresholds=[0.05, 0.01, 0.001] ):
"""return cartoon of significance of a p-Value."""
n = 0
for x in thresholds:
if pvalue > x: return "*" * n
n += 1
return "*" * n
class Result(object):
'''allow both member and dictionary access.'''
slots=("_data")
def __init__(self):
object.__setattr__(self, "_data", dict())
def fromR( self, take, r_result ):
'''convert from an *r_result* dictionary using map *take*.
*take* is a list of tuples mapping a field to the corresponding
field in *r_result*.
'''
for x,y in take:
if y:
self._data[x] = r_result.rx(y)[0][0]
else:
self._data[x] = r_result.rx(x)[0][0]
# if y:
# self._data[x] = r_result[y]
# else:
# self._data[x] = r_result[x]
return self
def __getattr__(self, key):
if not key.startswith("_"):
try: return object.__getattribute__(self,"_data")[key]
except KeyError: pass
return getattr( self._data, key )
def keys(self): return self._data.keys()
def values(self): return self._data.values()
def __len__(self): return self._data.__len__()
def __str__(self):
return str(self._data)
def __contains__(self,key):
return key in self._data
def __getitem__(self, key ):
return self._data[key]
def __delitem__(self, key ):
del self._data[key]
def __setitem__(self, key, value ):
self._data[key] = value
def __setattr__(self, key, value):
if not key.startswith("_"):
self._data[key] = value
else:
object.__setattr__(self,key,value)
#################################################################
#################################################################
#################################################################
## Perform log likelihood test
class LogLikelihoodTest:
def __init__(self):
pass
def doLogLikelihoodTest( complex_ll, complex_np,
simple_ll, simple_np,
significance_threshold = 0.05):
"""perform log-likelihood test between model1 and model2.
"""
assert complex_ll >= simple_ll, "log likelihood of complex model smaller than for simple model: %f > %f" % (complex_ll, simple_ll)
chi = 2 * (complex_ll - simple_ll)
df = complex_np - simple_np
if df <= 0:
raise ValueError, "difference of degrees of freedom not larger than 0"
p = scipy.stats.chisqprob( chi, df )
l = LogLikelihoodTest()
l.mComplexLogLikelihood = complex_ll
l.mSimpleLogLikelihood = simple_ll
l.mComplexNumParameters = complex_np
l.mSimpleNumParameters = simple_np
l.mSignificanceThreshold = significance_threshold
l.mProbability = p
l.mChiSquaredValue = chi
l.mDegreesFreedom = df
if p < significance_threshold:
l.mPassed = True
else:
l.mPassed = False
return l
#################################################################
#################################################################
#################################################################
class BinomialTest:
def __init__(self):
pass
def doBinomialTest( p, sample_size, observed, significance_threshold = 0.05):
"""perform a binomial test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
"""
pass
#################################################################
#################################################################
#################################################################
class ChiSquaredTest:
def __init__(self):
pass
def doChiSquaredTest( matrix, significance_threshold = 0.05 ):
'''perform chi-squared test on a matrix.
The observed/expected values are in rows, the categories are in columns, for
example:
+---------+--------------+--------+----------+
|set |protein_coding|intronic|intergenic|
+---------+--------------+--------+----------+
|observed |92 |90 |194 |
+---------+--------------+--------+----------+
|expected |91 |10 |15 |
+---------+--------------+--------+----------+
If there are only two categories (one degrees of freedom) the Yates correction is applied.
For each entry (observed-expected), the value 0.5 is subtracted ignoring the sign of the difference.
The test throws an exception if
1. one or more expected categories are less than 1 (it does not matter what the observed values are)
2. more than one-fifth of expected categories are less than 5
'''
nrows, ncols = matrix.shape
if nrows != 2:
raise NotImplementedError( "chi-square currently only implemented for 2xn tables." )
n = 0
for x in range(ncols):
if matrix[1][x] < 1:
raise ValueError( "matrix contains expected counts < 1" )
if matrix[1][x] < 5: n +=1
if 100.0 * n / ncols > 20.0:
raise ValueError( "more than 20% of expected categories are less than 5" )
row_sums = [ sum(matrix[x,:]) for x in range( nrows ) ]
col_sums = [ sum(matrix[:,x]) for x in range( ncols ) ]
sample_size = float(sum(row_sums))
chi = 0.0
df = (nrows - 1) * (ncols -1 )
## Yates correction applies for a 2x2 table only (df==1)
if df == 1:
correction = 0.5 * 0.5
else:
correction = 0
for x in range(nrows):
for y in range(ncols):
expected = row_sums[x] * col_sums[y] / sample_size
# compute difference and apply Yates correction
d = abs(matrix[x,y] - expected) - correction
chi += (d * d ) / expected
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob( chi, df )
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance( result.mProbability )
result.mSampleSize = sample_size
result.mPhi = math.sqrt( result.mChiSquaredValue / result.mSampleSize )
return result
def doPearsonChiSquaredTest( p, sample_size, observed, significance_threshold = 0.05):
"""perform a pearson chi squared test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
For large sample sizes, this test is a continuous approximation to
the binomial test.
"""
e = float(p) * sample_size
d = float(observed) - e
chi = d * d / e
df = 1
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob( chi, df )
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance( result.mProbability )
result.mSampleSize = sample_size
result.mPhi = math.sqrt( result.mChiSquaredValue / result.mSampleSize )
result.mObserved = observed
result.mExpected = e
return result
#################################################################
#################################################################
#################################################################
## Convenience functions and objects for statistical analysis
class DistributionalParameters:
"""a collection of distributional parameters. Available properties
are:
mMean, mMedian, mMin, mMax, mSampleStd, mSum, mCounts
This method is deprecated - use :class:`Summary` instead.
"""
def __init__(self, values = None, format = "%6.4f", mode="float"):
self.mMean, self.mMedian, self.mMin, self.mMax, self.mSampleStd, self.mSum, self.mCounts, self.mQ1, self.mQ3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values != None and len(values) > 0: self.updateProperties( values )
self.mFormat = format
self.mMode = mode
self.mNErrors = 0
def updateProperties( self, values):
"""update properties.
If values is an vector of strings, each entry will be converted
to float. Entries that can not be converted are ignored.
"""
values = [x for x in values if x != None ]
if len(values) == 0:
raise ValueError( "no data for statistics" )
## convert
self.mNErrors = 0
if type(values[0]) not in (types.IntType, types.FloatType):
n = []
for x in values:
try:
n.append( float(x) )
except ValueError:
self.mNErrors += 1
else:
n = values
if len(n) == 0:
raise ValueError( "no data for statistics" )
## use a non-sort algorithm later.
n.sort()
self.mQ1 = n[len(n) / 4]
self.mQ3 = n[len(n) * 3 / 4]
self.mCounts = len(n)
self.mMin = min(n)
self.mMax = max(n)
self.mMean = scipy.mean( n )
self.mMedian = scipy.median( n )
self.mSampleStd = scipy.std( n )
self.mSum = reduce( lambda x, y: x+y, n )
def getZScore( self, value ):
"""return zscore for value."""
if self.mSampleStd > 0:
return (value - self.mMean) / self.mSampleStd
else:
return 0
def setFormat( self, format ):
"""set number format."""
self.mFormat = format
def getHeaders( self ):
"""returns header of column separated values."""
return ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3")
def getHeader( self ):
"""returns header of column separated values."""
return "\t".join( self.getHeaders())
def items(self):
return [ (x, self.__getitem__(x)) for x in self.getHeaders() ]
def __getitem__( self, key ):
if key == "nval": return self.mCounts
if key == "min": return self.mMin
if key == "max": return self.mMax
if key == "mean": return self.mMean
if key == "median": return self.mMedian
if key == "stddev": return self.mSampleStd
if key == "sum": return self.mSum
if key == "q1": return self.mQ1
if key == "q3": return self.mQ3
raise KeyError, key
def __str__( self ):
"""return string representation of data."""
if self.mMode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self.mFormat
format_median = self.mFormat
return "\t".join( ( "%i" % self.mCounts,
format_vals % self.mMin,
format_vals % self.mMax,
self.mFormat % self.mMean,
format_median % self.mMedian,
self.mFormat % self.mSampleStd,
format_vals % self.mSum,
format_vals % self.mQ1,
format_vals % self.mQ3,
) )
class Summary( Result ):
"""a collection of distributional parameters. Available properties
are:
mean, median, min, max, samplestd, sum, counts
"""
fields = ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3")
def __init__(self, values = None,
format = "%6.4f", mode="float",
allow_empty = True ):
Result.__init__(self)
self._format = format
self._mode = mode
# note that this determintes the order of the fields at output
self.counts, self.min, self.max, self.mean, self.median, self.samplestd, self.sum, self.q1, self.q3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values != None:
values = [x for x in values if x != None ]
if len(values) == 0:
if allow_empty: return
else: raise ValueError( "no data for statistics" )
# convert
self._nerrors = 0
if type(values[0]) not in (types.IntType, types.FloatType):
n = []
for x in values:
try:
n.append( float(x) )
except ValueError:
self._nerrors += 1
else:
n = values
## use a non-sort algorithm?
n.sort()
if len(n):
self.q1 = n[len(n) / 4]
self.q3 = n[len(n) * 3 / 4]
else:
self.q1 = self.q3 = 0
self.counts = len(n)
self.min = min(n)
self.max = max(n)
self.mean = scipy.mean( n )
self.median = scipy.median( n )
self.samplestd = scipy.std( n )
self.sum = reduce( lambda x, y: x+y, n )
def getHeaders( self ):
"""returns header of column separated values."""
return self.fields
def getHeader( self ):
"""returns header of column separated values."""
return "\t".join( self.getHeaders())
def __str__( self ):
"""return string representation of data."""
if self._mode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self._format
format_median = self._format
return "\t".join( ( "%i" % self.counts,
format_vals % self.min,
format_vals % self.max,
self._format % self.mean,
format_median % self.median,
self._format % self.samplestd,
format_vals % self.sum,
format_vals % self.q1,
format_vals % self.q3,
) )
def adjustPValues( pvalues, method ):
'''adjust P-Values for multiple testing using
the p.adjust() method in R.
Possible values of method are:
c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none")
'''
return R.p_adjust( pvalues, method )
def smoothPValues( pvalues,
vlambda=numpy.arange(0,0.95,0.05),
smooth_df = 3,
smooth_log_pi0 = False):
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
print "pi0=", pi0
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
print spi0
if smooth_log_pi0:
pi0 = math.exp(pi0)
return pi0
def getPi0( pvalues,
vlambda=numpy.arange(0,0.95,0.05),
pi0_method="smoother",
smooth_df = 3,
smooth_log_pi0 = False):
'''used within nubiscan.'''
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
return pi0
class FDRResult:
def __init__(self):
pass
def plot(self, hardcopy = None):
if hardcopy:
R.png(hardcopy, width=1024, height=768, type="cairo")
R.require('qvalue')
# build a qobj
R.assign( "pval", self.mPValues )
R.assign( "pi0", self.mPi0 )
R.assign( "qval", self.mQValues )
R.assign( "lambda", self.mLambda )
R("""qobj <-list( pi0=pi0, qvalues=qval, pvalues=pval, lambda=lambda)""")
R(""" class(qobj) <- "qvalue" """)
R("""qplot(qobj)""")
if hardcopy:
R.dev_off()
def doFDR(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False,
plot = False ):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
# set to default of qvalue method
if vlambda == None: vlambda = numpy.arange(0,0.95,0.05)
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
if type(vlambda) == float:
vlambda = (vlambda, )
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" If length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
m = len(pvalues)
# these next few functions are the various ways to estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
R.assign( "pi0", pi0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 -vlambda[i] )
R.assign( "pi0", pi0)
R.assign( "vlambda", vlambda)
if pi0_method=="smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
R.assign( "smooth_df", smooth_df)
spi0 = R("""spi0 <- smooth.spline(vlambda,pi0, df = smooth_df)""")
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot( vlambda, pi0 )
x2 = numpy.arange( 0, 1, 0.001 )
R.assign( "x2", x2)
y2 = R("""y2 <- predict( spi0, x = x2 )$y""")
plt.plot( x2, y2 )
plt.show()
pi0 = R("""pi0 <- predict( spi0, x = max(vlambda) )$y""")[0]
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
R.assign( "pvalues", pvalues)
pi0 = R("""
m <- length(pvalues)
minpi0 <- min(pi0)
mse <- rep(0,length(vlambda))
pi0_boot <- rep(0,length(vlambda))
for(i in 1:100)
{
pvalues_boot <- sample(pvalues,size=m,replace=TRUE)
for(i in 1:length(vlambda))
{
pi0_boot[i] <- mean(pvalues_boot>vlambda[i])/(1-vlambda[i])
}
mse <- mse + (pi0_boot-minpi0)^2
}
pi0 <- min(pi0[mse==min(mse)])""")[0]
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
R.assign( "pi0", pi0 )
if pi0 <= 0:
raise ValueError( "The estimated pi0 (%f) <= 0. Check that you have valid p-values or use another vlambda method." % pi0)
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# The estimated q-values calculated here
#u = numpy.argsort( p )
# change by Alan
# ranking function which returns number of observations less than or equal
ro.globalenv['pvalues'] = ro.FloatVector( pvalues )
R.assign( "robust", robust )
qvalues = R("""u <- order(pvalues)
qvalues.rank <- function(x)
{
idx <- sort.list(x)
fc <- factor(x)
nl <- length(levels(fc))
bin <- as.integer(fc)
tbl <- tabulate(bin)
cs <- cumsum(tbl)
tbl <- rep(cs, tbl)
tbl[idx] <- tbl
return(tbl)
}
v <- qvalues.rank(pvalues)
m <- length(pvalues)
qvalues <- pi0 * m * pvalues / v
if(robust)
{
qvalues <- pi0*m*pvalues/(v*(1-(1-pvalues)^m))
}
qvalues[u[m]] <- min(qvalues[u[m]],1)
rqvalues <- qvalues
for(i in (m-1):1)
{
qvalues[u[i]] <- min(qvalues[u[i]],qvalues[u[i+1]],1)
}
qvalues
""")
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
return result
def doFDRPython(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df = 3,
smooth_log_pi0 = False,
pi0 = None,
plot = False ):
"""modeled after code taken from http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by Storey et al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError( "p-values out of range" )
# set to default of qvalue method
if vlambda == None: vlambda = numpy.arange(0,0.95,0.05)
m = len(pvalues)
pvalues = numpy.array( pvalues, dtype = numpy.float )
if pi0 == None:
if type(vlambda) == float:
vlambda = (vlambda,)
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(" if length of vlambda greater than 1, you need at least 4 values." )
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError( "vlambda must be within [0, 1).")
# estimate pi0
if len(vlambda)==1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >=1 :
raise ValueError( "vlambda must be within [0, 1).")
pi0 = numpy.mean( [ x >= vlambda for x in pvalues ] ) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
else:
pi0 = numpy.zeros( len(vlambda), numpy.float )
for i in range( len(vlambda) ):
pi0[i] = numpy.mean( [x >= vlambda[i] for x in pvalues ]) / (1.0 - vlambda[i] )
if pi0_method=="smoother":
if smooth_log_pi0: pi0 = math.log(pi0)
tck = scipy.interpolate.splrep( vlambda,
pi0,
k = smooth_df,
s = 10000 )
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot( vlambda, pi0 )
x2 = numpy.arange( 0, 1, 0.001 )
y2 = scipy.interpolate.splev( x2, tck )
plt.plot( x2, y2 )
plt.show()
pi0 = scipy.interpolate.splev( max(vlambda), tck )
if smooth_log_pi0: pi0 = math.exp(pi0)
elif pi0_method=="bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros( len(vlambda), numpy.float )
pi0_boot = numpy.zeros( len(vlambda), numpy.float )
for i in xrange(100):
# sample pvalues
idx_boot = numpy.random.random_integers( 0, m-1, m)
pvalues_boot = pvalues[idx_boot]
for x in xrange( len(vlambda )):
# compute number of pvalues larger than lambda[x]
pi0_boot[x] = numpy.mean( pvalues_boot > vlambda[x]) / (1.0 - vlambda[x])
mse += (pi0_boot - minpi0) ** 2
pi0 = min( pi0[mse==min(mse)] )
else:
raise ValueError( "'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0,1.0)
if pi0 <= 0:
raise ValueError( "The estimated pi0 <= 0. Check that you have valid p-values or use another vlambda method." )
if fdr_level != None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError( "'fdr_level' must be within (0, 1].")
# compute qvalues
idx = numpy.argsort( pvalues )
# monotonically decreasing bins, so that bins[i-1] > x >= bins[i]
bins = numpy.unique( pvalues )[::-1]
# v[i] = number of observations less than or equal to pvalue[i]
# could this be done more elegantly?
val2bin = len(bins) - numpy.digitize( pvalues, bins )
v = numpy.zeros( m, dtype = numpy.int )
lastbin = None
for x in xrange( m-1, -1, -1 ):
bin = val2bin[idx[x]]
if bin != lastbin: c = x
v[idx[x]] = c+1
lastbin = bin
qvalues = pvalues * pi0 * m / v
if robust:
qvalues /= ( 1.0 - ( 1.0 - pvalues)**m )
# bound qvalues by 1 and make them monotonic
qvalues[idx[m-1]] = min(qvalues[idx[m-1]],1.0)
for i in xrange(m-2,-1,-1):
qvalues[idx[i]] = min(min(qvalues[idx[i]],qvalues[idx[i+1]]),1.0)
result = FDRResult()
result.mQValues = qvalues
if fdr_level != None:
result.mPassed = [ x <= fdr_level for x in result.mQValues ]
else:
result.mPassed = [ False for x in result.mQValues ]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
result.xvalues = qvalues
return result
#################################################################
#################################################################
#################################################################
class CorrelationTest:
'''coefficient is r, not r squared'''
def __init__(self,
r_result = None,
s_result = None,
method = None):
self.mPValue = None
self.mMethod = None
if r_result:
self.mCoefficient = r_result['estimate']['cor']
self.mPValue = float(r_result['p.value'])
self.mNObservations = r_result['parameter']['df']
self.mMethod = r_result['method']
self.mAlternative = r_result['alternative']
elif s_result:
self.mCoefficient = s_result[0]
self.mPValue = s_result[1]
self.mNObservations = 0
self.mAlternative = "two-sided"
else:
self.mCoefficient = 0
self.mPValue = 1
self.mSignificance = "na"
self.mNObservations = 0
self.mAlternative = "na"
self.mMethod = "na"
if method: self.mMethod = method
if self.mPValue != None:
self.mSignificance = getSignificance( self.mPValue )
def __str__(self):
return "\t".join( (
"%6.4f" % self.mCoefficient,
"%e" % self.mPValue,
self.mSignificance,
"%i" % self.mNObservations,
self.mMethod,
self.mAlternative ) )
@classmethod
def getHeaders(cls):
return ("coeff", "pvalue", "significance", "observations", "method", "alternative" )
def filterMasked( xvals, yvals, missing = ("na", "Nan", None, ""), dtype = numpy.float ):
"""convert xvals and yvals to numpy array skipping pairs with
one or more missing values."""
xmask = [ i in missing for i in xvals ]
ymask = [ i in missing for i in yvals ]
return (numpy.array( [xvals[i] for i in range(len(xvals)) if not xmask[i]], dtype = dtype ),
numpy.array( [yvals[i] for i in range(len(yvals)) if not ymask[i]], dtype = dtype) )
def doCorrelationTest( xvals, yvals ):
"""compute correlation between x and y.
Raises a value-error if there are not enough observations.
"""
if len(xvals) <= 1 or len(yvals) <= 1:
raise ValueError( "can not compute correlation with no data" )
if len(xvals) != len(yvals):
raise ValueError( "data vectors have unequal length" )
# try:
# result = CorrelationTest( r_result = R.cor_test( xvals, yvals, na_action="na_omit" ) )
# except rpy.RPyException, msg:
# raise ValueError( msg )
x, y = filterMasked( xvals, yvals )
result = CorrelationTest( s_result = scipy.stats.pearsonr( x, y ),
method = "pearson" )
result.mNObservations = len(x)
return result
def getPooledVariance( data ):
"""return pooled variance from a
list of tuples (sample_size, variance)."""
t, var = 0, 0
for n, s in data:
t += n
var += (n-1) * s
assert t > len(data), "sample size smaller than samples combined"
return var / float(t - len(data))
###################################################################
###################################################################
###################################################################
## compute ROC curves from sorted values
###################################################################
def computeROC( values ):
'''return a roc curve for *values*. Values
is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
returns a list of (FPR,TPR) tuples.
'''
roc = []
npositives = len( [x for x in values if x[1] ] )
if npositives == 0:
raise ValueError( "no positives among values" )
ntotal = len(values)
last_value, last_fpr = None, None
tp, fp = 0, 0
tn, fn = ntotal - npositives, npositives
for value, is_positive in values:
if is_positive:
tp += 1
fn -= 1
else:
fp += 1
tn -= 1
if last_value != value:
try:
tpr = float(tp) / (tp + fn)
except ZeroDivisionError:
tpr = 0
try:
fpr = float(fp) / (fp + tn)
except ZeroDivisionError:
fpr = 0
if last_fpr != fpr:
roc.append( (fpr,tpr) )
last_fpr = fpr
last_values = value
return roc
class TTest:
def __init__(self): pass
class WelchTTest:
def __init__(self): pass
PairedTTest = collections.namedtuple( "PairedTTest", "statistic pvalue" )
def doPairedTTest( vals1, vals2) :
'''perform paired t-test.
vals1 and vals2 need to contain the same number of elements.
'''
return PairedTTest._make( scipy.stats.ttest_rel( vals1, vals2 ) )
def doWelchsTTest(n1, mean1, std1,
n2, mean2, std2,
alpha = 0.05 ):
'''Welch''s approximate t-test for the difference of two means of
heteroscedasctic populations.
This functions does a two-tailed test.
see PMID: 12016052
:Parameters:
n1 : int
number of variates in sample 1
n2 : int
number of variates in sample 2
mean1 : float
mean of sample 1
mean2 : float
mean of sample 2
std1 : float
standard deviation of sample 1
std2 : float
standard deviation of sample 2
returns a WelchTTest
'''
if std1 == 0 and std2 == 0:
raise ValueError( 'standard deviations are 0.')
# convert standard deviation to sample variance
svar1 = std1**2 * n1 / float(n1-1)
svar2 = std2**2 * n2 / float(n2-1)
# compute df and test statistic
df = ((svar1/n1 + svar2/n2)**2) / ( ((svar1/n1)**2)/(n1-1) + ((svar2/n2)**2)/(n2-1))
denom = numpy.sqrt(svar1/n1+svar2/n2)
z = abs(mean1 - mean2) / denom
# do the test
pvalue = 2 * scipy.stats.t.sf(z,df)
result = WelchTTest()
result.mPValue = pvalue
result.mDegreesFreedom = df
result.mZ = z
result.mMean1 = mean1
result.mMean2 = mean2
result.mSampleVariance1 = svar1
result.mSampleVariance2 = svar2
result.mDifference = mean1 - mean2
result.mZLower = scipy.stats.t.ppf( alpha, df )
result.mZUpper = scipy.stats.t.ppf( 1.0-alpha, df )
result.mDifferenceLower = result.mZLower * denom
result.mDifferenceUpper = result.mZUpper * denom
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
def getAreaUnderCurve( xvalues, yvalues ):
'''compute area under curve from a set of discrete x,y coordinates
using trapezoids.
This is only as accurate as the density of points.
'''
assert len(xvalues) == len(yvalues)
last_x, last_y = xvalues[0], yvalues[0]
auc = 0
for x,y in zip(xvalues, yvalues)[1:]:
dx = x - last_x
assert not dx <= 0, "x not increasing: %f >= %f" % (last_x, x)
dy = abs(last_y - y)
my = min(last_y, y)
# rectangle plus triangle
auc += dx * my + dx * dy / 2
last_x, last_y = x, y
return auc
###################################################################
###################################################################
###################################################################
##
###################################################################
def getSensitivityRecall( values ):
'''return sensitivity/selectivity.
Values is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
'''
npositives = 0.0
npredicted = 0.0
l = None
result = []
total = float(len(values))
for value, is_positive in values:
npredicted += 1.0
if is_positive > 0: npositives += 1.0
if value != l:
result.append( (value, npositives / npredicted, npredicted / total ) )
l = value
if l:
result.append( (l, npositives / npredicted, npredicted/total ) )
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
ROCResult = collections.namedtuple( "ROCResult",
"value pred tp fp tn fn tpr fpr tnr fnr rtpr rfnr" )
def getPerformance( values,
skip_redundant = True,
false_negatives = False,
bin_by_value = True,
monotonous = False,
multiple = False,
increasing = True,
total_positives = None,
total_false_negatives = None,
):
'''compute performance estimates for a list of ``(score, flag)``
tuples in *values*.
Values is a sorted list of (value, bool) pairs.
If the option *false-negative* is set, the input is +/- or 1/0 for a
true positive or false negative, respectively.
TP: true positives
FP: false positives
TPR: true positive rate = true_positives / predicted
P: predicted
FPR: false positive rate = false positives / predicted
value: value
'''
true_positives = 0
predicted = 0
last_value = None
binned_values = []
for value, flag in values:
if not bin_by_value:
if last_value != value:
binned_values.append( (true_positives, predicted, value) )
else:
if last_value != None and last_value != value:
binned_values.append( (true_positives, predicted, last_value) )
predicted += 1
if flag: true_positives += 1
last_value = value
binned_values.append( (true_positives, predicted, last_value) )
binned_values.append( (true_positives, predicted, value) )
if true_positives == 0:
raise ValueError("# no true positives!")
if total_positives == None:
if total_false_negatives:
positives = float(predicted)
else:
positives = float(true_positives)
else:
positives = float(total_positives)
last_positives = None
last_tpr = None
result = []
for true_positives, predicted, value in binned_values:
if (predicted == 0):
predicted = 1
if total_false_negatives:
false_negatives = predicted - true_positives
false_positives = 0
true_negatives = 0
else:
true_negatives = 0
false_negatives = positives - true_positives
false_positives = predicted - true_positives
tpr = float(true_positives) / predicted
fpr = float(false_positives) / (true_positives + false_negatives )
fnr = float(false_negatives) / positives
tnr = 0
# relative rates
rfpr = float(false_positives) / predicted
rfnr = float(false_negatives) / predicted
if monotonous and last_tpr and last_tpr < tpr:
continue
if skip_redundant and true_positives == last_positives:
continue
if (predicted > 0):
result.append( ROCResult._make(
(value,
predicted,
true_positives,
false_positives,
true_negatives,
false_negatives,
tpr, fpr, tnr, fnr,
rfpr, rfnr ) ) )
last_positives = true_positives
last_tpr = tpr
return result
###################################################################
###################################################################
###################################################################
##
###################################################################
def doMannWhitneyUTest( xvals, yvals ):
'''apply the Mann-Whitney U test to test for the difference of medians.'''
r_result = R.wilcox_test( xvals, yvals, paired = False )
result = Result().fromR(
( ("pvalue", 'p.value'),
('alternative', None),
('method', None ) ),
r_result )
return result
| [
"[email protected]"
] | |
4f40417e1b3d5e7727b23349015224819e159c34 | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_payment_method_payment_schedules_request.py | 452cc8ec29e8473df8ec6a5a8e0ae80b14d7d5f7 | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,270 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.payment_method_payment_schedules_request import PaymentMethodPaymentSchedulesRequest # noqa: E501
from openapi_client.rest import ApiException
class TestPaymentMethodPaymentSchedulesRequest(unittest.TestCase):
"""PaymentMethodPaymentSchedulesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentMethodPaymentSchedulesRequest(self):
"""Test PaymentMethodPaymentSchedulesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.payment_method_payment_schedules_request.PaymentMethodPaymentSchedulesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8147523bcb0f515c279cdd116378042b0911fd7c | 56e469a1bfd29004fa258a54668dfbbc4459663d | /python3-nltk-tutorial/src/lesson2.py | eea468d14140f4c269abb2552dfb9c86ded6c8b6 | [] | no_license | wind86/learning | bfce4a6795b58b27d0148b878299cacfe96aa26f | 4449ba0eed0a8f803a2bb9fbd663faf43148f03a | refs/heads/master | 2020-04-05T23:28:40.082439 | 2017-11-04T11:36:40 | 2017-11-04T11:36:40 | 83,236,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | '''
Created on Apr 09, 2017
Stop words with NLTK
https://www.youtube.com/watch?v=w36-U-ccajM&index=2&list=PLQVvvaa0QuDf2JswnfiGkliBInZnIC4HL
@author: ubuntu
'''
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
example_sent = "This is a sample sentence, showing off the stop words filtration."
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
print(word_tokens)
print(filtered_sentence) | [
"[email protected]"
] | |
b14af7cffc6ef7e61fd07f241da400470e0d2847 | 672fef1cd92f24cc13dbb651f60d7b1081468bed | /catkin_ws/build/kit_agv_teleop/catkin_generated/pkg.installspace.context.pc.py | b82d54ca750a09c83c061b72924977e025a63ceb | [] | no_license | Forrest-Z/DevelopAgv | 49eca36e0a4a714fb232100b6216f4801409aa56 | e7d0ac39f3964557d7f67f074ddba73e5c6f0d3a | refs/heads/master | 2022-12-14T12:41:30.309513 | 2020-09-07T14:21:16 | 2020-09-07T14:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kit_agv_teleop"
PROJECT_SPACE_DIR = "/home/nhamtung/TungNV/DevelopAgv/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
ed0a7a587fa699bb3e21e4116d874fda8a2c2d5c | 3337e9150a743e0df2898528dd1e4dfac9730b25 | /artemis/fileman/persistent_print.py | 13b30ccc07235563122878b4675f41b117e62124 | [] | no_license | ml-lab/artemis | f3353cb462b06d64e1007010db94667b4703c90e | b4f5f627f1798aff90b845d70fd582142a9f76c8 | refs/heads/master | 2021-01-22T06:49:41.346341 | 2017-09-01T15:31:13 | 2017-09-01T15:31:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import sys
from artemis.fileman.local_dir import get_artemis_data_path
from artemis.general.display import CaptureStdOut
__author__ = 'peter'
"""
Save Print statements:
Useful in ipython notebooks where you lose output when printing to the browser.
On advice from:
http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python
** Note this is no longer being used. Possibly delete
"""
_ORIGINAL_STDOUT = sys.stdout
_ORIGINAL_STDERR = sys.stderr
def capture_print(log_file_path = 'logs/dump/%T-log.txt', print_to_console=True):
"""
:param log_file_path: Path of file to print to, if (state and to_file). If path does not start with a "/", it will
be relative to the data directory. You can use placeholders such as %T, %R, ... in the path name (see format
filename)
:param print_to_console:
:param print_to_console: Also continue printing to console.
:return: The absolute path to the log file.
"""
local_log_file_path = get_artemis_data_path(log_file_path)
logger = CaptureStdOut(log_file_path=local_log_file_path, print_to_console=print_to_console)
logger.__enter__()
sys.stdout = logger
sys.stderr = logger
return local_log_file_path
def stop_capturing_print():
sys.stdout = _ORIGINAL_STDOUT
sys.stderr = _ORIGINAL_STDERR
def new_log_file(log_file_path = 'dump/%T-log', print_to_console = False):
"""
Just capture-print with different defaults - intended to be called from notebooks where
you don't want all output printed, but want to be able to see it with a link.
:param log_file_path: Path to the log file - %T is replaced with time
:param print_to_console: True to continue printing to console
"""
return capture_print(log_file_path=log_file_path, print_to_console=print_to_console)
def read_print():
return sys.stdout.read()
def reprint():
assert isinstance(sys.stdout, CaptureStdOut), "Can't call reprint unless you've turned on capture_print"
# Need to avoid exponentially growing prints...
current_stdout = sys.stdout
sys.stdout = _ORIGINAL_STDOUT
print read_print()
sys.stdout = current_stdout
| [
"[email protected]"
] | |
c367f874817b32c6f63cee71858c33cc30dede45 | 5d0fe4a9e026234fe15e6c4380355061bb4dac64 | /tests/functional/pages/profile/individual_enter_your_personal_details.py | 53c55f143ecca632274757bbfec1c6127897fa4a | [
"MIT"
] | permissive | uktrade/directory-tests | 37e243862da8ac594cf1ea06ade714db5e1aba03 | 39ec6c26203580238e65566a472cbd80916e6726 | refs/heads/master | 2022-08-09T16:58:56.248982 | 2022-08-01T12:25:10 | 2022-08-01T12:25:10 | 71,367,747 | 4 | 3 | MIT | 2022-08-01T12:26:09 | 2016-10-19T14:48:57 | Python | UTF-8 | Python | false | false | 1,572 | py | # -*- coding: utf-8 -*-
"""Profile - Enter your personal details"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Enter your individual details"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_PERSONAL_DETAILS.absolute
EXPECTED_STRINGS = [
"Enter your personal details",
"First name",
"Last name",
"Job title",
"Phone number (optional)",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor):
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "personal-details",
"personal-details-given_name": actor.alias,
"personal-details-family_name": "AUTOMATED TESTS",
"personal-details-job_title": "DIT AUTOMATED TESTS",
"personal-details-phone_number": "0987654321",
"personal-details-terms_agreed": "on",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
| [
"[email protected]"
] | |
6e3f7646454551de97bff7229a6e4a0d163b2856 | ca231a325e8f4c18d50d89ffa7eec993d4cc68c3 | /codility/minimal_interger_not_ocurrs.py | 4f9b4ac785566637a02e89df334015135a5bb335 | [] | no_license | HugoPorto/PythonCodes | 8e1597999ccd34ffa86df5ae7e91111d77dc7a22 | 539ad6891cbd49a2c011349f843ab710aad2993a | refs/heads/master | 2022-02-13T05:48:24.633750 | 2017-09-12T15:44:06 | 2017-09-12T15:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # -*- coding:utf-8 -*-
def solution(A):
''' Solve it with Pigeonhole principle.
There are N integers in the input. So for the
first N+1 positive integers, at least one of
them must be missing.
'''
# We only care about the first N+1 positive integers.
# occurrence[i] is for the integer i+1.
occurrence = [False] * (len(A) + 1)
for item in A:
if 1 <= item <= len(A) + 1:
occurrence[item - 1] = True
# Find out the missing minimal positive integer.
for index in xrange(len(A) + 1):
if occurrence[index] == False:
return index + 1
raise Exception("Should never be here.")
return -1
assert solution([-1]) == 1
assert solution([1, 3, 6, 4, 1, 2]) == 5
assert solution([1]) == 2
assert solution([-1, 0, 1, 3]) == 2
assert solution([-1, 0, 1, 2]) == 3 | [
"[email protected]"
] | |
12ccbb6a49dc123cca42202409efb9bb333f2c8c | a135e6aebb4b525d090272c107d9986ed50ec919 | /grip/__init__.py | 263bab0ee2649d40658a1dc3023c1a3e0b27c6d5 | [
"MIT"
] | permissive | wemersondev/grip | 2a6740d32e045cfa6639936d6640555ea81d3b53 | 8a9d7caf2f8a7cf07d8b31e030600404b4c498c7 | refs/heads/master | 2021-01-24T03:26:40.071776 | 2018-02-25T19:58:13 | 2018-02-25T19:58:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | """\
Grip
----
Render local readme files before sending off to GitHub.
:copyright: (c) 2014-2016 by Joe Esposito.
:license: MIT, see LICENSE for more details.
"""
__version__ = '4.4.0'
import sys
# Patch for Flask 11.0+ on Python 3 (pypy3)
if not hasattr(sys, 'exc_clear'):
sys.exc_clear = lambda: None
from .api import (
clear_cache, create_app, export, render_content, render_page, serve)
from .app import Grip
from .assets import GitHubAssetManager, ReadmeAssetManager
from .command import main
from .constants import (
DEFAULT_API_URL, DEFAULT_FILENAMES, DEFAULT_FILENAME, DEFAULT_GRIPHOME,
DEFAULT_GRIPURL, STYLE_ASSET_URLS_INLINE_FORMAT, STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT, STYLE_URLS_RE, STYLE_URLS_SOURCE,
SUPPORTED_EXTENSIONS, SUPPORTED_TITLES)
from .exceptions import AlreadyRunningError, ReadmeNotFoundError
from .readers import ReadmeReader, DirectoryReader, StdinReader, TextReader
from .renderers import ReadmeRenderer, GitHubRenderer, OfflineRenderer
__all__ = [
'__version__',
'DEFAULT_API_URL', 'DEFAULT_FILENAMES', 'DEFAULT_FILENAME',
'DEFAULT_GRIPHOME', 'DEFAULT_GRIPURL', 'STYLE_ASSET_URLS_INLINE_FORMAT',
'STYLE_ASSET_URLS_RE', 'STYLE_ASSET_URLS_SUB_FORMAT', 'STYLE_URLS_RE',
'STYLE_URLS_SOURCE', 'SUPPORTED_EXTENSIONS', 'SUPPORTED_TITLES',
'AlreadyRunningError', 'DirectoryReader', 'GitHubAssetManager',
'GitHubRenderer', 'Grip', 'OfflineRenderer', 'ReadmeNotFoundError',
'ReadmeAssetManager', 'ReadmeReader', 'ReadmeRenderer', 'StdinReader',
'TextReader',
'clear_cache', 'create_app', 'export', 'main', 'render_content',
'render_page', 'serve',
]
| [
"[email protected]"
] | |
18df10d8b1c09bf6663d3185bce769d2c532a8f7 | 8c6816435093cb8e9e45593d3ffdd67028a011b6 | /tests/test_is_palindrome_permutation.py | 8afe1e3ee3486b7078ef4211c354a84d7504048b | [] | no_license | Keeady/daily-coding-challenge | 6ee74a5fe639a1f5b4753dd4848d0696bef15c28 | 31eebbf4c1d0eb88a00f71bd5741adf5e07d0e94 | refs/heads/master | 2020-03-27T07:58:05.713290 | 2019-03-08T15:03:05 | 2019-03-08T15:03:05 | 146,210,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from String import is_palindrome_permutation
def test_is_palindrome_permutation():
str = 'Tact Coa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact oCoa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'Tact Ca'
assert True == is_palindrome_permutation.is_palindrome_permutation(str)\
str = 'Duck Duck Go'
assert False == is_palindrome_permutation.is_palindrome_permutation(str)
str = 'tactcoapapa'
assert True == is_palindrome_permutation.is_palindrome_permutation(str) | [
"[email protected]"
] | |
5f7a882ac493f5606e6abb240272852b980809e0 | bd053d2bf5444ab8f0b8b0ff56772fa75281e38d | /pennylane/ops/qubit/parametric_ops.py | 555e4e926946ab402e54ca0a390ea633b1db97ed | [
"Apache-2.0"
] | permissive | johannesjmeyer/pennylane | bcb762583e95537b04a9b38756369571f957d2e5 | 8f602312baea107d5248267fb3dc1593722810e0 | refs/heads/master | 2023-07-11T18:21:31.086858 | 2021-08-14T19:21:42 | 2021-08-14T19:21:42 | 341,190,636 | 3 | 1 | Apache-2.0 | 2021-06-16T09:01:58 | 2021-02-22T12:19:10 | Python | UTF-8 | Python | false | false | 40,493 | py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum operations that are the
core parameterized gates.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access
import cmath
import functools
import math
import numpy as np
import pennylane as qml
from pennylane.operation import AnyWires, DiagonalOperation, Operation
from pennylane.ops.qubit.non_parametric_ops import PauliX, PauliY, PauliZ, Hadamard
from pennylane.templates.decorator import template
from pennylane.utils import expand, pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / math.sqrt(2)
class RX(Operation):
r"""RX(phi, wires)
The single qubit X rotation
.. math:: R_x(\phi) = e^{-i\phi\sigma_x/2} = \begin{bmatrix}
\cos(\phi/2) & -i\sin(\phi/2) \\
-i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_x(\phi)) = \frac{1}{2}\left[f(R_x(\phi+\pi/2)) - f(R_x(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_x(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "X"
grad_method = "A"
generator = [PauliX, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
js = 1j * math.sin(-theta / 2)
return np.array([[c, js], [js, c]])
def adjoint(self):
return RX(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRX(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RX(\theta) = RZ(-\pi/2) RY(\theta) RZ(\pi/2)
return [np.pi / 2, self.data[0], -np.pi / 2]
class RY(Operation):
r"""RY(phi, wires)
The single qubit Y rotation
.. math:: R_y(\phi) = e^{-i\phi\sigma_y/2} = \begin{bmatrix}
\cos(\phi/2) & -\sin(\phi/2) \\
\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_y(\phi)) = \frac{1}{2}\left[f(R_y(\phi+\pi/2)) - f(R_y(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_y(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Y"
grad_method = "A"
generator = [PauliY, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array([[c, -s], [s, c]])
def adjoint(self):
return RY(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRY(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RY(\theta) = RZ(0) RY(\theta) RZ(0)
return [0.0, self.data[0], 0.0]
class RZ(DiagonalOperation):
r"""RZ(phi, wires)
The single qubit Z rotation
.. math:: R_z(\phi) = e^{-i\phi\sigma_z/2} = \begin{bmatrix}
e^{-i\phi/2} & 0 \\
0 & e^{i\phi/2}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_z(\phi)) = \frac{1}{2}\left[f(R_z(\phi+\pi/2)) - f(R_z(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_z(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [PauliZ, -1 / 2]
@classmethod
def _matrix(cls, *params):
theta = params[0]
p = cmath.exp(-0.5j * theta)
return np.array([[p, 0], [0, p.conjugate()]])
@classmethod
def _eigvals(cls, *params):
theta = params[0]
p = cmath.exp(-0.5j * theta)
return np.array([p, p.conjugate()])
def adjoint(self):
return RZ(-self.data[0], wires=self.wires)
def _controlled(self, wire):
CRZ(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RZ(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class PhaseShift(DiagonalOperation):
r"""PhaseShift(phi, wires)
Arbitrary single qubit local phase shift
.. math:: R_\phi(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_\phi(\phi)) = \frac{1}{2}\left[f(R_\phi(\phi+\pi/2)) - f(R_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [np.array([[0, 0], [0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0], [0, cmath.exp(1j * phi)]])
@classmethod
def _eigvals(cls, *params):
phi = params[0]
return np.array([1, cmath.exp(1j * phi)])
@staticmethod
def decomposition(phi, wires):
decomp_ops = [RZ(phi, wires=wires)]
return decomp_ops
def adjoint(self):
return PhaseShift(-self.data[0], wires=self.wires)
def _controlled(self, wire):
ControlledPhaseShift(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# PhaseShift(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class ControlledPhaseShift(DiagonalOperation):
r"""ControlledPhaseShift(phi, wires)
A qubit controlled phase shift.
.. math:: CR_\phi(\phi) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & e^{i\phi}
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(CR_\phi(\phi)) = \frac{1}{2}\left[f(CR_\phi(\phi+\pi/2)) - f(CR_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`CR_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
generator = [np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, cmath.exp(1j * phi)]])
@classmethod
def _eigvals(cls, *params):
phi = params[0]
return np.array([1, 1, 1, cmath.exp(1j * phi)])
@staticmethod
def decomposition(phi, wires):
decomp_ops = [
qml.PhaseShift(phi / 2, wires=wires[0]),
qml.CNOT(wires=wires),
qml.PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
qml.PhaseShift(phi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return ControlledPhaseShift(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
CPhase = ControlledPhaseShift
class Rot(Operation):
r"""Rot(phi, theta, omega, wires)
Arbitrary single qubit rotation
.. math::
R(\phi,\theta,\omega) = RZ(\omega)RY(\theta)RZ(\phi)= \begin{bmatrix}
e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2) \\
e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Gradient recipe: :math:`\frac{d}{d\phi}f(R(\phi, \theta, \omega)) = \frac{1}{2}\left[f(R(\phi+\pi/2, \theta, \omega)) - f(R(\phi-\pi/2, \theta, \omega))\right]`
where :math:`f` is an expectation value depending on :math:`R(\phi, \theta, \omega)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \theta, \omega\}`.
.. note::
If the ``Rot`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.RZ` and :class:`~.RY` gates.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 3
num_wires = 1
par_domain = "R"
is_composable_rotation = True
grad_method = "A"
@classmethod
def _matrix(cls, *params):
phi, theta, omega = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[
cmath.exp(-0.5j * (phi + omega)) * c,
-cmath.exp(0.5j * (phi - omega)) * s,
],
[
cmath.exp(-0.5j * (phi - omega)) * s,
cmath.exp(0.5j * (phi + omega)) * c,
],
]
)
@staticmethod
def decomposition(phi, theta, omega, wires):
decomp_ops = [
RZ(phi, wires=wires),
RY(theta, wires=wires),
RZ(omega, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return Rot(-omega, -theta, -phi, wires=self.wires)
def _controlled(self, wire):
CRot(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
return self.data
class MultiRZ(DiagonalOperation):
r"""MultiRZ(theta, wires)
Arbitrary multi Z rotation.
.. math::
MultiRZ(\theta) = \exp(-i \frac{\theta}{2} Z^{\otimes n})
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\theta}f(MultiRZ(\theta)) = \frac{1}{2}\left[f(MultiRZ(\theta +\pi/2)) - f(MultiRZ(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`MultiRZ(\theta)`.
.. note::
If the ``MultiRZ`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RZ` and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 1
num_wires = AnyWires
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, theta, n):
"""Matrix representation of a MultiRZ gate.
Args:
theta (float): Rotation angle.
n (int): Number of wires the rotation acts on. This has
to be given explicitly in the static method as the
wires object is not available.
Returns:
array[complex]: The matrix representation
"""
multi_Z_rot_eigs = MultiRZ._eigvals(theta, n)
multi_Z_rot_matrix = np.diag(multi_Z_rot_eigs)
return multi_Z_rot_matrix
_generator = None
@property
def generator(self):
if self._generator is None:
self._generator = [np.diag(pauli_eigs(len(self.wires))), -1 / 2]
return self._generator
@property
def matrix(self):
# Redefine the property here to pass additionally the number of wires to the ``_matrix`` method
if self.inverse:
# The matrix is diagonal, so there is no need to transpose
return self._matrix(*self.parameters, len(self.wires)).conj()
return self._matrix(*self.parameters, len(self.wires))
@classmethod
def _eigvals(cls, theta, n):
return np.exp(-1j * theta / 2 * pauli_eigs(n))
@property
def eigvals(self):
# Redefine the property here to pass additionally the number of wires to the ``_eigvals`` method
if self.inverse:
return self._eigvals(*self.parameters, len(self.wires)).conj()
return self._eigvals(*self.parameters, len(self.wires))
@staticmethod
@template
def decomposition(theta, wires):
for i in range(len(wires) - 1, 0, -1):
qml.CNOT(wires=[wires[i], wires[i - 1]])
RZ(theta, wires=wires[0])
for i in range(len(wires) - 1):
qml.CNOT(wires=[wires[i + 1], wires[i]])
def adjoint(self):
return MultiRZ(-self.parameters[0], wires=self.wires)
class PauliRot(Operation):
r"""PauliRot(theta, pauli_word, wires)
Arbitrary Pauli word rotation.
.. math::
RP(\theta, P) = \exp(-i \frac{\theta}{2} P)
**Details:**
* Number of wires: Any
* Number of parameters: 2 (1 differentiable parameter)
* Gradient recipe: :math:`\frac{d}{d\theta}f(RP(\theta)) = \frac{1}{2}\left[f(RP(\theta +\pi/2)) - f(RP(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`RP(\theta)`.
.. note::
If the ``PauliRot`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RX`, :class:`~.Hadamard`, :class:`~.RZ`
and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 2
num_wires = AnyWires
do_check_domain = False
par_domain = "R"
grad_method = "A"
_ALLOWED_CHARACTERS = "IXYZ"
_PAULI_CONJUGATION_MATRICES = {
"X": Hadamard._matrix(),
"Y": RX._matrix(np.pi / 2),
"Z": np.array([[1, 0], [0, 1]]),
}
def __init__(self, *params, wires=None, do_queue=True):
super().__init__(*params, wires=wires, do_queue=do_queue)
pauli_word = params[1]
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
'The given Pauli word "{}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z".format(pauli_word)
)
num_wires = 1 if isinstance(wires, int) else len(wires)
if not len(pauli_word) == num_wires:
raise ValueError(
"The given Pauli word has length {}, length {} was expected for wires {}".format(
len(pauli_word), num_wires, wires
)
)
@staticmethod
def _check_pauli_word(pauli_word):
"""Check that the given Pauli word has correct structure.
Args:
pauli_word (str): Pauli word to be checked
Returns:
bool: Whether the Pauli word has correct structure.
"""
return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in pauli_word)
@classmethod
def _matrix(cls, *params):
theta = params[0]
pauli_word = params[1]
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
'The given Pauli word "{}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z".format(pauli_word)
)
# Simplest case is if the Pauli is the identity matrix
if pauli_word == "I" * len(pauli_word):
return np.exp(-1j * theta / 2) * np.eye(2 ** len(pauli_word))
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
multi_Z_rot_matrix = MultiRZ._matrix(theta, len(non_identity_gates))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
np.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
return expand(
conjugation_matrix.T.conj() @ multi_Z_rot_matrix @ conjugation_matrix,
non_identity_wires,
list(range(len(pauli_word))),
)
_generator = None
@property
def generator(self):
if self._generator is None:
pauli_word = self.parameters[1]
# Simplest case is if the Pauli is the identity matrix
if pauli_word == "I" * len(pauli_word):
self._generator = [np.eye(2 ** len(pauli_word)), -1 / 2]
return self._generator
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
# get MultiRZ's generator
multi_Z_rot_generator = np.diag(pauli_eigs(len(non_identity_gates)))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
np.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
self._generator = [
expand(
conjugation_matrix.T.conj() @ multi_Z_rot_generator @ conjugation_matrix,
non_identity_wires,
list(range(len(pauli_word))),
),
-1 / 2,
]
return self._generator
@classmethod
def _eigvals(cls, theta, pauli_word):
# Identity must be treated specially because its eigenvalues are all the same
if pauli_word == "I" * len(pauli_word):
return np.exp(-1j * theta / 2) * np.ones(2 ** len(pauli_word))
return MultiRZ._eigvals(theta, len(pauli_word))
@staticmethod
@template
def decomposition(theta, pauli_word, wires):
# Catch cases when the wire is passed as a single int.
if isinstance(wires, int):
wires = [wires]
# Check for identity and do nothing
if pauli_word == "I" * len(wires):
return
active_wires, active_gates = zip(
*[(wire, gate) for wire, gate in zip(wires, pauli_word) if gate != "I"]
)
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
Hadamard(wires=[wire])
elif gate == "Y":
RX(np.pi / 2, wires=[wire])
MultiRZ(theta, wires=list(active_wires))
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
Hadamard(wires=[wire])
elif gate == "Y":
RX(-np.pi / 2, wires=[wire])
def adjoint(self):
return PauliRot(-self.parameters[0], self.parameters[1], wires=self.wires)
# Four term gradient recipe for controlled rotations
c1 = INV_SQRT2 * (np.sqrt(2) + 1) / 4
c2 = INV_SQRT2 * (np.sqrt(2) - 1) / 4
a = np.pi / 2
b = 3 * np.pi / 2
four_term_grad_recipe = ([[c1, 1, a], [-c1, 1, -a], [-c2, 1, b], [c2, 1, -b]],)
class CRX(Operation):
r"""CRX(phi, wires)
The controlled-RX operator
.. math::
\begin{align}
CR_x(\phi) &=
\begin{bmatrix}
& 1 & 0 & 0 & 0 \\
& 0 & 1 & 0 & 0\\
& 0 & 0 & \cos(\phi/2) & -i\sin(\phi/2)\\
& 0 & 0 & -i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RX operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_x(\phi)) = c_+ \left[f(CR_x(\phi+a)) - f(CR_x(\phi-a))\right] - c_- \left[f(CR_x(\phi+b)) - f(CR_x(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_x(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "X"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
js = 1j * math.sin(-theta / 2)
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c, js], [0, 0, js, c]])
@staticmethod
def decomposition(theta, wires):
decomp_ops = [
RZ(np.pi / 2, wires=wires[1]),
RY(theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-np.pi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return CRX(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRY(Operation):
r"""CRY(phi, wires)
The controlled-RY operator
.. math::
\begin{align}
CR_y(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & \cos(\phi/2) & -\sin(\phi/2)\\
0 & 0 & \sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RY operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_y(\phi)) = c_+ \left[f(CR_y(\phi+a)) - f(CR_y(\phi-a))\right] - c_- \left[f(CR_y(\phi+b)) - f(CR_y(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_y(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Y"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c, -s], [0, 0, s, c]])
@staticmethod
def decomposition(theta, wires):
decomp_ops = [
RY(theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRY(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRZ(DiagonalOperation):
r"""CRZ(phi, wires)
The controlled-RZ operator
.. math::
\begin{align}
CR_z(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i\phi/2} & 0\\
0 & 0 & 0 & e^{i\phi/2}
\end{bmatrix}.
\end{align}
.. note:: The subscripts of the operations in the formula refer to the wires they act on, e.g. 1 corresponds to the first element in ``wires`` that is the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: The controlled-RZ operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\phi}f(CR_z(\phi)) = c_+ \left[f(CR_z(\phi+a)) - f(CR_z(\phi-a))\right] - c_- \left[f(CR_z(\phi+b)) - f(CR_z(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_z(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
is_composable_rotation = True
basis = "Z"
grad_method = "A"
grad_recipe = four_term_grad_recipe
generator = [
np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
theta = params[0]
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cmath.exp(-0.5j * theta), 0],
[0, 0, 0, cmath.exp(0.5j * theta)],
]
)
@classmethod
def _eigvals(cls, *params):
theta = params[0]
return np.array(
[
1,
1,
cmath.exp(-0.5j * theta),
cmath.exp(0.5j * theta),
]
)
@staticmethod
def decomposition(lam, wires):
decomp_ops = [
PhaseShift(lam / 2, wires=wires[1]),
qml.CNOT(wires=wires),
PhaseShift(-lam / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRZ(-self.data[0], wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CRot(Operation):
r"""CRot(phi, theta, omega, wires)
The controlled-Rot operator
.. math:: CR(\phi, \theta, \omega) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2)\\
0 & 0 & e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 3
* Gradient recipe: The controlled-Rot operator satisfies a four-term parameter-shift rule
(see Appendix F, https://arxiv.org/abs/2104.05695):
.. math::
\frac{d}{d\mathbf{x}_i}f(CR(\mathbf{x}_i)) = c_+ \left[f(CR(\mathbf{x}_i+a)) - f(CR(\mathbf{x}_i-a))\right] - c_- \left[f(CR(\mathbf{x}_i+b)) - f(CR(\mathbf{x}_i-b))\right]
where :math:`f` is an expectation value depending on :math:`CR(\mathbf{x}_i)`, and
- :math:`\mathbf{x} = (\phi, \theta, \omega)` and `i` is an index to :math:`\mathbf{x}`
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int]): the wire the operation acts on
"""
num_params = 3
num_wires = 2
par_domain = "R"
grad_method = "A"
grad_recipe = four_term_grad_recipe * 3
@classmethod
def _matrix(cls, *params):
phi, theta, omega = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[
0,
0,
cmath.exp(-0.5j * (phi + omega)) * c,
-cmath.exp(0.5j * (phi - omega)) * s,
],
[
0,
0,
cmath.exp(-0.5j * (phi - omega)) * s,
cmath.exp(0.5j * (phi + omega)) * c,
],
]
)
@staticmethod
def decomposition(phi, theta, omega, wires):
decomp_ops = [
RZ((phi - omega) / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-(phi + omega) / 2, wires=wires[1]),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(theta / 2, wires=wires[1]),
RZ(omega, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return CRot(-omega, -theta, -phi, wires=self.wires)
class U1(Operation):
r"""U1(phi)
U1 gate.
.. math:: U_1(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
.. note::
The ``U1`` gate is an alias for the phase shift operation :class:`~.PhaseShift`.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_1(\phi)) = \frac{1}{2}\left[f(U_1(\phi+\pi/2)) - f(U_1(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`U_1(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = "R"
grad_method = "A"
generator = [np.array([[0, 0], [0, 1]]), 1]
@classmethod
def _matrix(cls, *params):
phi = params[0]
return np.array([[1, 0], [0, cmath.exp(1j * phi)]])
@staticmethod
def decomposition(phi, wires):
return [PhaseShift(phi, wires=wires)]
def adjoint(self):
return U1(-self.data[0], wires=self.wires)
class U2(Operation):
r"""U2(phi, lambda, wires)
U2 gate.
.. math::
U_2(\phi, \lambda) = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -\exp(i \lambda)
\\ \exp(i \phi) & \exp(i (\phi + \lambda)) \end{bmatrix}
The :math:`U_2` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_2(\phi, \lambda) = R_\phi(\phi+\lambda) R(\lambda,\pi/2,-\lambda)
.. note::
If the ``U2`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.Rot` and :class:`~.PhaseShift` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 2
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_2(\phi, \lambda)) = \frac{1}{2}\left[f(U_2(\phi+\pi/2, \lambda)) - f(U_2(\phi-\pi/2, \lambda))\right]`
where :math:`f` is an expectation value depending on :math:`U_2(\phi, \lambda)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \lambda\}`.
Args:
phi (float): azimuthal angle :math:`\phi`
lambda (float): quantum phase :math:`\lambda`
wires (Sequence[int] or int): the subsystem the gate acts on
"""
num_params = 2
num_wires = 1
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, *params):
phi, lam = params
return INV_SQRT2 * np.array(
[
[1, -cmath.exp(1j * lam)],
[cmath.exp(1j * phi), cmath.exp(1j * (phi + lam))],
]
)
@staticmethod
def decomposition(phi, lam, wires):
decomp_ops = [
Rot(lam, np.pi / 2, -lam, wires=wires),
PhaseShift(lam, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, lam = self.parameters
new_lam = (np.pi - phi) % (2 * np.pi)
new_phi = (np.pi - lam) % (2 * np.pi)
return U2(new_phi, new_lam, wires=self.wires)
class U3(Operation):
r"""U3(theta, phi, lambda, wires)
Arbitrary single qubit unitary.
.. math::
U_3(\theta, \phi, \lambda) = \begin{bmatrix} \cos(\theta/2) & -\exp(i \lambda)\sin(\theta/2) \\
\exp(i \phi)\sin(\theta/2) & \exp(i (\phi + \lambda))\cos(\theta/2) \end{bmatrix}
The :math:`U_3` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_3(\theta, \phi, \lambda) = R_\phi(\phi+\lambda) R(\lambda,\theta,-\lambda)
.. note::
If the ``U3`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.PhaseShift` and :class:`~.Rot` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_3(\theta, \phi, \lambda)) = \frac{1}{2}\left[f(U_3(\theta+\pi/2, \phi, \lambda)) - f(U_3(\theta-\pi/2, \phi, \lambda))\right]`
where :math:`f` is an expectation value depending on :math:`U_3(\theta, \phi, \lambda)`.
This gradient recipe applies for each angle argument :math:`\{\theta, \phi, \lambda\}`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
lambda (float): quantum phase :math:`\lambda`
wires (Sequence[int] or int): the subsystem the gate acts on
"""
num_params = 3
num_wires = 1
par_domain = "R"
grad_method = "A"
@classmethod
def _matrix(cls, *params):
theta, phi, lam = params
c = math.cos(theta / 2)
s = math.sin(theta / 2)
return np.array(
[
[c, -s * cmath.exp(1j * lam)],
[s * cmath.exp(1j * phi), c * cmath.exp(1j * (phi + lam))],
]
)
@staticmethod
def decomposition(theta, phi, lam, wires):
decomp_ops = [
Rot(lam, theta, -lam, wires=wires),
PhaseShift(lam, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
theta, phi, lam = self.parameters
new_lam = (np.pi - phi) % (2 * np.pi)
new_phi = (np.pi - lam) % (2 * np.pi)
return U3(theta, new_phi, new_lam, wires=self.wires)
class IsingXX(Operation):
r"""IsingXX(phi, wires)
Ising XX coupling gate
.. math:: XX(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & -i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
-i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(XX(\phi)) = \frac{1}{2}\left[f(XX(\phi +\pi/2)) - f(XX(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`XX(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]),
-1 / 2,
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
c = math.cos(phi / 2)
s = math.sin(phi / 2)
return np.array(
[
[c, 0, 0, -1j * s],
[0, c, -1j * s, 0],
[0, -1j * s, c, 0],
[-1j * s, 0, 0, c],
]
)
@staticmethod
def decomposition(phi, wires):
decomp_ops = [
qml.CNOT(wires=wires),
RX(phi, wires=[wires[0]]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
(phi,) = self.parameters
return IsingXX(-phi, wires=self.wires)
class IsingYY(Operation):
r"""IsingYY(phi, wires)
Ising YY coupling gate
.. math:: \mathtt{YY}(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(YY(\phi)) = \frac{1}{2}\left[f(YY(\phi +\pi/2)) - f(YY(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`YY(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[0, 0, 0, -1], [0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0]]),
-1 / 2,
]
@staticmethod
def decomposition(phi, wires):
return [
qml.CY(wires=wires),
qml.RY(phi, wires=[wires[0]]),
qml.CY(wires=wires),
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
cos = np.cos(phi / 2)
isin = 1.0j * np.sin(phi / 2)
return np.array(
[
[cos, 0.0, 0.0, isin],
[0.0, cos, -isin, 0.0],
[0.0, -isin, cos, 0.0],
[isin, 0.0, 0.0, cos],
],
dtype=complex,
)
def adjoint(self):
(phi,) = self.parameters
return IsingYY(-phi, wires=self.wires)
class IsingZZ(Operation):
r""" IsingZZ(phi, wires)
Ising ZZ coupling gate
.. math:: ZZ(\phi) = \begin{bmatrix}
e^{-i \phi / 2} & 0 & 0 & 0 \\
0 & e^{i \phi / 2} & 0 & 0 \\
0 & 0 & e^{i \phi / 2} & 0 \\
0 & 0 & 0 & e^{-i \phi / 2}
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}f(ZZ(\phi)) = \frac{1}{2}\left[f(ZZ(\phi +\pi/2)) - f(ZZ(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`ZZ(\theta)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
"""
num_params = 1
num_wires = 2
par_domain = "R"
grad_method = "A"
generator = [
np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]),
-1 / 2,
]
@staticmethod
def decomposition(phi, wires):
return [
qml.CNOT(wires=wires),
qml.RZ(phi, wires=[wires[1]]),
qml.CNOT(wires=wires),
]
@classmethod
def _matrix(cls, *params):
phi = params[0]
pos_phase = np.exp(1.0j * phi / 2)
neg_phase = np.exp(-1.0j * phi / 2)
return np.diag([neg_phase, pos_phase, pos_phase, neg_phase])
def adjoint(self):
(phi,) = self.parameters
return IsingZZ(-phi, wires=self.wires)
| [
"[email protected]"
] | |
a28380fa7c747fd2980e91a59c59853a625e2940 | 3cf1535bd23bfbfe078464eb6ba9043e30b2d67d | /RLBotPack/Skybot/bot_functions.py | 39e65467fbe9a1ecf5442a3283ad0c06a4459d71 | [
"MIT"
] | permissive | RLBot/RLBotPack | 11bab3be9dea24521853a5ba3f0ba5716c9922d2 | d4756871449a6e587186e4f5d8830fc73a85c33a | refs/heads/master | 2023-07-09T08:37:05.350458 | 2023-05-17T21:51:07 | 2023-05-17T21:51:07 | 188,609,141 | 27 | 132 | MIT | 2023-09-12T00:02:43 | 2019-05-25T20:24:44 | Python | UTF-8 | Python | false | false | 10,014 | py | import math
def ball_path_predict(data_loc_speed): # [self.game_time, self.ball_lt_z, self.ball_lt_speed_z]
start_on=0
#print(data_loc_speed)
loc_x=data_loc_speed[1][0][start_on]
loc_y=data_loc_speed[1][1][start_on]
loc_z=data_loc_speed[1][2][start_on]
speed_x=data_loc_speed[2][0][start_on]
speed_y=data_loc_speed[2][1][start_on]
speed_z=data_loc_speed[2][2][start_on]
ang_speed_x=data_loc_speed[3][0][start_on]
ang_speed_y=data_loc_speed[3][1][start_on]
ang_speed_z=data_loc_speed[3][2][start_on]
time_i=data_loc_speed[0][start_on]
timer=0
time_l=[]
ground_t=[]
predic_loc_z_t=[]
predic_loc_x_t=[]
predic_loc_y_t=[]
ground_loc_x=[]
ground_loc_y=[]
ground_loc_z=[]
predicted_loc=[[],[[],[],[]],[],[[],[],[]],[],[[],[],[]]]
bounce_t=0
goal=False
ground_next=False
ball_rolling=False
air_friction=0.013
gravity=-650
ball_radius=93
step=1/120
perpendicular_restitution=0.60
#todo: change depending on entry angle
paralel_restitution=0.713
spin_inertia=0.4
while timer < 5:
time=timer-bounce_t
#z
loc_z_t=loc_z+((speed_z*(1-air_friction*time))*time)+(0.5*(gravity)*(time**2))
#x
loc_x_t=loc_x+((speed_x*(1-air_friction*time))*time)
#y
loc_y_t=loc_y+((speed_y*(1-air_friction*time))*time)
if loc_z_t<ball_radius:
speed_z=(speed_z*(1-air_friction*time-step)+gravity*(time-step))
speed_z=abs(speed_z)*perpendicular_restitution
loc_z=ball_radius
loc_x=loc_x+((speed_x*(1-air_friction*time))*time)
loc_y=loc_y+((speed_y*(1-air_friction*time))*time)
bounce_t=timer
ground_next=True
if speed_z < 0.01:
speed_z=0
ground_next=False
ball_rolling=True
speed_x=speed_x*(1-air_friction*time)#*paralel_restitution
speed_y=speed_y*(1-air_friction*time)#*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x + ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
speed_y = (speed_y - ang_speed_x * ball_radius * spin_inertia) * paralel_restitution
ang_speed_x = -speed_y/ball_radius
ang_speed_y = speed_x/ball_radius
ang_speed_z = speed_z/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
elif abs(loc_z_t)>2044-ball_radius:
loc_z=2044-ball_radius
speed_z=-(speed_z*(1-air_friction*time-step)+gravity*(time-step))
#speed_z=speed_z*(1-air_friction*(time))
speed_z=speed_z*perpendicular_restitution
loc_y=loc_y+((speed_y*(1-air_friction*(time)))*(time))
speed_y=speed_y*(1-air_friction*(time)) #*paralel_restitution
loc_x=loc_x+((speed_x*(1-air_friction*(time)))*(time))
speed_x=speed_x*(1-air_friction*(time)) #*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
#speed_z = (speed_x - ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
ang_speed_x = speed_z/ball_radius
ang_speed_y = speed_z/ball_radius
#ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
elif abs(loc_x_t)>4096-ball_radius:
if loc_x>0:
loc_x=4096-ball_radius
else:
loc_x=-4096+ball_radius
speed_x=speed_x*(1-air_friction*(time))
speed_x=-speed_x*perpendicular_restitution
loc_y=loc_y+((speed_y*(1-air_friction*(time)))*(time))
speed_y=speed_y*(1-air_friction*(time))#*paralel_restitution
loc_z=loc_z+((speed_z*(1-air_friction*(time)))*(time))+(0.5*(gravity)*((time)**2))
speed_z=(speed_z*(1-air_friction*(time-step))+gravity*((time-step)))#*paralel_restitution #implement spin
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
#speed_x = (speed_x + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
speed_z = (speed_z - ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
#ang_speed_x = -speed_z/ball_radius
ang_speed_y = -speed_z/ball_radius
ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
elif abs(loc_y_t)>5120-ball_radius:
if abs(loc_x_t)<892.755-ball_radius and abs(loc_z_t)<642.775-ball_radius:
goal=True
break
if loc_y>0:
loc_y=5120-ball_radius
else:
loc_y=-5120+ball_radius
speed_y=speed_y*(1-air_friction*time)#*paralel_restitution
speed_y=-speed_y*perpendicular_restitution
loc_z=loc_z+((speed_z*(1-air_friction*time))*time)+(0.5*(gravity)*(time**2))
loc_x=loc_x+((speed_x*(1-air_friction*time))*time)
speed_x=speed_x*(1-air_friction*time)#*paralel_restitution
speed_z=(speed_z*(1-air_friction*(time-step))+gravity*((time-step)))#*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x - ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
#speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_z = (speed_z + ang_speed_x * ball_radius * spin_inertia) * custom_friction
ang_speed_x = -speed_z/ball_radius
#ang_speed_y = -speed_z/ball_radius
ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
else:
predic_loc_z_t+=[loc_z_t]
predic_loc_x_t+=[loc_x_t]
predic_loc_y_t+=[loc_y_t]
tick_time=[timer+time_i]
time_l+=tick_time
timer+=step
if ground_next:
ground_t+=tick_time
ground_loc_x+=[loc_x_t]
ground_loc_y+=[loc_y_t]
ground_loc_z+=[loc_z_t]
ground_next=False
if ball_rolling:
ground_loc_x+=[loc_x_t]
ground_loc_y+=[loc_y_t]
ground_loc_z+=[loc_z_t]
predicted_loc[0]=time_l
predicted_loc[1][0]=predic_loc_x_t
predicted_loc[1][1]=predic_loc_y_t
predicted_loc[1][2]=predic_loc_z_t
predicted_loc[2]=ground_t
predicted_loc[3][0]=ground_loc_x
predicted_loc[3][1]=ground_loc_y
predicted_loc[3][2]=ground_loc_z
if goal:
predicted_loc[4]=timer
predicted_loc[5][0]=loc_x
predicted_loc[5][1]=loc_y
predicted_loc[5][2]=loc_z
else:
predicted_loc[4]=0
predicted_loc[5][0]=0
predicted_loc[5][1]=0
predicted_loc[5][2]=0
return predicted_loc
| [
"[email protected]"
] | |
83ebf96ed9d709453f2542d0921655ff7857ce40 | caf135d264c4c1fdd320b42bf0d019e350938b2d | /04_Algorithms/Leetcode/L24_Swap Nodes in Pairs.py | eba7c0bc0a8f2006110eb82a2b8a1604aa56fe07 | [] | no_license | coolxv/DL-Prep | 4243c51103bdc38972b8a7cbe3db4efa93851342 | 3e6565527ee8479e178852fffc4ccd0e44166e48 | refs/heads/master | 2022-12-31T22:42:20.806208 | 2020-10-23T10:19:19 | 2020-10-23T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
else:
first = head
second = first.next
afternode = second.next
head = second
head.next = first
first.next = afternode
while afternode and afternode.next:
prevnode = first
first,second = afternode,afternode.next
afternode = second.next
prevnode.next = second
second.next = first
first.next = afternode
return head
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.