blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1332cc18df8bec9ad15a0c2f869d24c6ca979ac3 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /LightGBM_sklearn_scipy_numpy/source/scipy/optimize/nonlin.py | a43bb62e19e9f80bda88799dbc3c8aa46227e215 | [
"MIT",
"GPL-3.0-or-later",
"Qhull",
"GPL-3.0-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"GCC-exception-3.1"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 46,761 | py | r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/fr16/index.php
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| [
"[email protected]"
]
| |
39d800e56c5d45069919499cce94f111c88d636d | 22eea10f9b62ad742be7a0ee9109a913e0ddb1bb | /StInt/EPI/Arrays/Merged-Intervals/merge_detect.py | 85969ec6b2e6d9edc6af203e1682077cb92d5dc9 | []
| no_license | mihirkelkar/languageprojects | f991610476fd64aabfec5a8cc60c263908085e17 | 4439fcf843a964fccf14a1c4dba6d054ca35d048 | refs/heads/master | 2021-01-01T20:16:22.322304 | 2020-12-20T19:03:40 | 2020-12-20T19:03:40 | 20,080,717 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | class Event(object):
def __init__(self, start, end):
self.start = start
self.end = end
def print_times(self):
print self.start,
print self.end
def find_merge_intervals(list_of_intervals):
sorted_list = sorted(list_of_intervals, key = lambda x : x.start)
for ii in range(1, len(sorted_list)):
if sorted_list[ii - 1].end >= sorted_list[ii].start:
sorted_list[ii].start = sorted_list[ii - 1].start
if sorted_list[ii - 1].end > sorted_list[ii].end:
sorted_list[ii].end = sorted_list[ii - 1].end
sorted_list[ii - 1] = None
return [ii for ii in sorted_list if ii != None]
def main():
a = Event(1, 3)
b = Event(2, 6)
c = Event(8, 10)
d = Event(15, 18)
temp_list = find_merge_intervals([a, b, c, d])
for ii in temp_list:
ii.print_times()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
37e28d11f1034c82a9f1482575f4e59e16a8dc35 | b68c855198c944a5dcc4b543f7609058f1d8db53 | /logparser/Drain.py | dd79f22e5ee1cba2f73baf42f30f6022c312340a | []
| no_license | forallx94/logbert | c4dab9d04386c5a716f946410ba10794d0cba379 | 77a5c37676731ade41495daf58ae038db37d6c63 | refs/heads/main | 2023-08-22T16:24:48.730343 | 2021-09-27T07:39:05 | 2021-09-27T07:39:05 | 407,034,672 | 0 | 0 | null | 2021-09-16T05:37:19 | 2021-09-16T05:37:19 | null | UTF-8 | Python | false | false | 12,717 | py | """
Description : This file implements the Drain algorithm for log parsing
Author : LogPAI team
License : MIT
"""
import re
import os
import numpy as np
import pandas as pd
import hashlib
from datetime import datetime
class Logcluster:
def __init__(self, logTemplate='', logIDL=None):
self.logTemplate = logTemplate
if logIDL is None:
logIDL = []
self.logIDL = logIDL
class Node:
def __init__(self, childD=None, depth=0, digitOrtoken=None):
if childD is None:
childD = dict()
self.childD = childD
self.depth = depth
self.digitOrtoken = digitOrtoken
class LogParser:
def __init__(self, log_format, indir='./', outdir='./result/', depth=4, st=0.4,
maxChild=100, rex=[], keep_para=True):
"""
Attributes
----------
rex : regular expressions used in preprocessing (step1)
path : the input path stores the input log file name
depth : depth of all leaf nodes
st : similarity threshold
maxChild : max number of children of an internal node
logName : the name of the input file containing raw log messages
savePath : the output path stores the file containing structured logs
"""
self.path = indir
self.depth = depth - 2
self.st = st
self.maxChild = maxChild
self.logName = None
self.savePath = outdir
self.df_log = None
self.log_format = log_format
self.rex = rex
self.keep_para = keep_para
def hasNumbers(self, s):
return any(char.isdigit() for char in s)
def treeSearch(self, rn, seq):
retLogClust = None
seqLen = len(seq)
if seqLen not in rn.childD:
return retLogClust
parentn = rn.childD[seqLen]
currentDepth = 1
for token in seq:
if currentDepth >= self.depth or currentDepth > seqLen:
break
if token in parentn.childD:
parentn = parentn.childD[token]
elif '<*>' in parentn.childD:
parentn = parentn.childD['<*>']
else:
return retLogClust
currentDepth += 1
logClustL = parentn.childD
retLogClust = self.fastMatch(logClustL, seq)
return retLogClust
def addSeqToPrefixTree(self, rn, logClust):
seqLen = len(logClust.logTemplate)
if seqLen not in rn.childD:
firtLayerNode = Node(depth=1, digitOrtoken=seqLen)
rn.childD[seqLen] = firtLayerNode
else:
firtLayerNode = rn.childD[seqLen]
parentn = firtLayerNode
currentDepth = 1
for token in logClust.logTemplate:
# Add current log cluster to the leaf node
if currentDepth >= self.depth or currentDepth > seqLen:
if len(parentn.childD) == 0:
parentn.childD = [logClust]
else:
parentn.childD.append(logClust)
break
# If token not matched in this layer of existing tree.
if token not in parentn.childD:
if not self.hasNumbers(token):
if '<*>' in parentn.childD:
if len(parentn.childD) < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrtoken=token)
parentn.childD[token] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if len(parentn.childD) + 1 < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrtoken=token)
parentn.childD[token] = newNode
parentn = newNode
elif len(parentn.childD) + 1 == self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrtoken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if '<*>' not in parentn.childD:
newNode = Node(depth=currentDepth + 1, digitOrtoken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
# If the token is matched
else:
parentn = parentn.childD[token]
currentDepth += 1
# seq1 is template
def seqDist(self, seq1, seq2):
assert len(seq1) == len(seq2)
simTokens = 0
numOfPar = 0
for token1, token2 in zip(seq1, seq2):
if token1 == '<*>':
numOfPar += 1
continue #comment@haixuanguo: <*> == <*> are similar pairs
if token1 == token2:
simTokens += 1
retVal = float(simTokens) / len(seq1)
return retVal, numOfPar
def fastMatch(self, logClustL, seq):
retLogClust = None
maxSim = -1
maxNumOfPara = -1
maxClust = None
for logClust in logClustL:
curSim, curNumOfPara = self.seqDist(logClust.logTemplate, seq)
if curSim > maxSim or (curSim == maxSim and curNumOfPara > maxNumOfPara):
maxSim = curSim
maxNumOfPara = curNumOfPara
maxClust = logClust
if maxSim >= self.st:
retLogClust = maxClust
return retLogClust
def getTemplate(self, seq1, seq2):
assert len(seq1) == len(seq2)
retVal = []
i = 0
for word in seq1:
if word == seq2[i]:
retVal.append(word)
else:
retVal.append('<*>')
i += 1
return retVal
def outputResult(self, logClustL):
log_templates = [0] * self.df_log.shape[0]
log_templateids = [0] * self.df_log.shape[0]
df_events = []
for logClust in logClustL:
template_str = ' '.join(logClust.logTemplate)
occurrence = len(logClust.logIDL)
template_id = hashlib.md5(template_str.encode('utf-8')).hexdigest()[0:8]
for logID in logClust.logIDL:
logID -= 1
log_templates[logID] = template_str
log_templateids[logID] = template_id
df_events.append([template_id, template_str, occurrence])
df_event = pd.DataFrame(df_events, columns=['EventId', 'EventTemplate', 'Occurrences'])
self.df_log['EventId'] = log_templateids
self.df_log['EventTemplate'] = log_templates
if self.keep_para:
self.df_log["ParameterList"] = self.df_log.apply(self.get_parameter_list, axis=1)
self.df_log.to_csv(os.path.join(self.savePath, self.logName + '_structured.csv'), index=False)
occ_dict = dict(self.df_log['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = self.df_log['EventTemplate'].unique()
df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(str(x).encode('utf-8')).hexdigest()[0:8])
df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)
df_event.to_csv(os.path.join(self.savePath, self.logName + '_templates.csv'), index=False,
columns=["EventId", "EventTemplate", "Occurrences"])
def printTree(self, node, dep):
pStr = ''
for i in range(dep):
pStr += '\t'
if node.depth == 0:
pStr += 'Root'
elif node.depth == 1:
pStr += '<' + str(node.digitOrtoken) + '>'
else:
pStr += node.digitOrtoken
print(pStr)
if node.depth == self.depth:
return 1
for child in node.childD:
self.printTree(node.childD[child], dep + 1)
def parse(self, logName):
print('Parsing file: ' + os.path.join(self.path, logName))
start_time = datetime.now()
self.logName = logName
rootNode = Node()
logCluL = []
self.load_data()
count = 0
for idx, line in self.df_log.iterrows():
logID = line['LineId']
logmessageL = self.preprocess(line['Content']).strip().split()
# logmessageL = filter(lambda x: x != '', re.split('[\s=:,]', self.preprocess(line['Content'])))
matchCluster = self.treeSearch(rootNode, logmessageL)
# Match no existing log cluster
if matchCluster is None:
newCluster = Logcluster(logTemplate=logmessageL, logIDL=[logID])
logCluL.append(newCluster)
self.addSeqToPrefixTree(rootNode, newCluster)
# Add the new log message to the existing cluster
else:
newTemplate = self.getTemplate(logmessageL, matchCluster.logTemplate)
matchCluster.logIDL.append(logID)
if ' '.join(newTemplate) != ' '.join(matchCluster.logTemplate):
matchCluster.logTemplate = newTemplate
count += 1
if count % 1000 == 0 or count == len(self.df_log):
print('Processed {0:.1f}% of log lines.'.format(count * 100.0 / len(self.df_log)), end='\r')
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
self.outputResult(logCluL)
print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - start_time))
def load_data(self):
headers, regex = self.generate_logformat_regex(self.log_format)
self.df_log = self.log_to_dataframe(os.path.join(self.path, self.logName), regex, headers, self.log_format)
def preprocess(self, line):
for currentRex in self.rex:
line = re.sub(currentRex, '<*>', line)
return line
def log_to_dataframe(self, log_file, regex, headers, logformat):
""" Function to transform log file to dataframe
"""
log_messages = []
linecount = 0
cnt = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
cnt += 1
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
# print("\n", line)
# print(e)
pass
print("Total size after encoding is", linecount, cnt)
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(self, logformat):
""" Function to generate regular expression to split log messages
"""
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\\\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
def get_parameter_list(self, row):
template_regex = re.sub(r"<.{1,5}>", "<*>", str(row["EventTemplate"]))
if "<*>" not in template_regex: return []
template_regex = re.sub(r'([^A-Za-z0-9])', r'\\\1', template_regex)
template_regex = re.sub(r' +', r'\\s+', template_regex)
template_regex = "^" + template_regex.replace("\<\*\>", "(.*?)") + "$"
parameter_list = re.findall(template_regex, row["Content"])
parameter_list = parameter_list[0] if parameter_list else ()
parameter_list = list(parameter_list) if isinstance(parameter_list, tuple) else [parameter_list]
return parameter_list | [
"[email protected]"
]
| |
f1b87c3c3ede937152c85642a188029018b7f37e | 8575ccf9e7e6b2257ec7aee1539c91afa90d65a5 | /nlp/_02_textcluster/utils.py | 620fa8b64c54d929715f4fe7148b0dfb2c4d06a3 | []
| no_license | oaifaye/pyfirst | 86b8765751175f0be0fe3f95850ff018eacf51d3 | e8661b5adf53afd47fa5cb6f01cd76535d8fc8b9 | refs/heads/master | 2021-12-12T00:33:39.523597 | 2021-08-13T08:32:10 | 2021-08-13T08:32:10 | 160,138,715 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | # -*- coding: utf-8 -*-
import pickle
import re
def readbunchobj(path):
file_obj = open(path,'rb')
bunch = pickle.load(file_obj)
file_obj.close()
return bunch
def writebunchobj(path,bunchobj):
file_obj = open(path,'wb')
pickle.dump(bunchobj,file_obj)
file_obj.close()
def readfile(savepath,encoding='UTF-8'):
fp = open(savepath,'r',encoding=encoding )
content = fp.read()
fp.close()
return content
def savefile(savepath,content,encoding='UTF-8'):
fp = open(savepath,'w',encoding=encoding)
fp.write(content)
fp.close()
def removeHTML(content):
content = re.sub('<\s*head[^>]*>[^<]*<\s*/\s*head\s*>','',content)
content = re.sub('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>','',content)
content = re.sub('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>','',content)
content = re.sub('<\s*HEAD[^>]*>[^<]*<\s*/\s*HEAD\s*>','',content)
content = re.sub('<\s*STYLE[^>]*>[^<]*<\s*/\s*STYLE\s*>','',content)
content = re.sub('<\s*SCRIPT[^>]*>[^<]*<\s*/\s*SCRIPT\s*>','',content)
content = re.sub('<[^>]+>','',content)
content = re.sub('%!.*!%','',content)
content = content.replace("\r\n","").strip()
content = content.replace("\n","").strip()
content = content.replace("\t","").strip()
content = content.replace(" ","").strip()
content = content.replace(" ","").strip()
content = content.replace(" ","").strip()
content = content.replace("“","").strip()
content = content.replace("•","").strip()
content = content.replace("”","").strip()
content = re.sub("[\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”!,。?、~@#¥%……&*():《》「」•●]+", "",content)
return content
# str = readfile("D:\\pythonCode\\First\\nlp\\_01_textclassify\\fastpredict\\content.txt")
# print(removeHTML(str)) | [
"[email protected]"
]
| |
0e5702ddf50c99377a738187f828539c6537451a | dfaf0169a799d81535c952a5c284d2ff6b8f2265 | /asgn_1/asgn_1_8.py | bd61dc0116ae78b066e394a8aa166f9998c72e61 | []
| no_license | RahulBantode/Python_Task_-OOPS- | 9f10cff655691518ed7147abe6503dee6013ff96 | ea7ad00e109349b999ec97588c59fb3b03a69bff | refs/heads/main | 2023-05-25T07:29:57.714579 | 2021-06-14T06:42:49 | 2021-06-14T06:42:49 | 346,242,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | ''' statement : write a program which accept number from user and print that number of * on screen'''
def PrintStar(value):
i=1
while i <= value:
print("*\t",end=" ")
i = i+1
def main():
no = int(input("How many times you want print \"*\" on screen : "))
PrintStar(no)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
e51e5b3ae82fc2479ec96ebc8b9d4e43855b4b4f | fed6c6bdb6276d195bc565e527c3f19369d22b74 | /test/separation_angle_test/cal_sep_ang_astropy.py | c0b80cc508f5c2ebba633e20ab73f9197f6aba4f | []
| no_license | hekunlie/astrophy-research | edbe12d8dde83e0896e982f08b463fdcd3279bab | 7b2b7ada7e7421585e8993192f6111282c9cbb38 | refs/heads/master | 2021-11-15T05:08:51.271669 | 2021-11-13T08:53:33 | 2021-11-13T08:53:33 | 85,927,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from astropy.coordinates import SkyCoord
from astropy import units
from sys import argv
ra1, dec1 = float(argv[1]), float(argv[2])
ra2, dec2 = float(argv[3]), float(argv[4])
c1 = SkyCoord(ra=ra1*units.deg, dec=dec1*units.deg,frame="fk5")
c2 = SkyCoord(ra=ra2*units.deg, dec=dec2*units.deg,frame="fk5")
sep = c1.separation(c2)
print("(%10.5f,%10.5f) <-- %10.5f rad (%10.5f deg) --> (%10.5f,%10.5f)"%(ra1, dec1,sep.radian, sep.deg,ra2, dec2))
| [
"[email protected]"
]
| |
b98b5daca8f6e76fde1e08f8c2ad2abf8451feeb | d7390fea6c7f712ee32be6d3478835d965d795e0 | /py26_24day/py26_api_test/testcases/test_add.py | fe1e0c2cdd31c1995b8b91963f0cff637e38ac2d | []
| no_license | luwenchun/Automated_Test | 2f424655d80127e3ed98657869021a775beca868 | 79b9937cfc0841b0a80d4fd45d8ff467654b5b55 | refs/heads/master | 2021-02-10T15:23:08.446463 | 2020-03-26T10:39:38 | 2020-03-26T10:39:38 | 244,393,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | """
============================
Author:柠檬班-木森
Time:2020/2/28 21:21
E-mail:[email protected]
Company:湖南零檬信息技术有限公司
============================
"""
import os
import unittest
import jsonpath
from py26_24day.py26_api_test.common.readexcel import ReadExcel
from py26_24day.py26_api_test.common.handlepath import DATADIR
from py26_24day.py26_api_test.library.ddt import ddt, data
from py26_24day.py26_api_test.common.handleconfig import conf
from py26_24day.py26_api_test.common.handlerequests import SendRequest
from py26_24day.py26_api_test.common.handle_data import CaseDate, replace_data
from py26_24day.py26_api_test.common.handlelog import log
file_path = os.path.join(DATADIR, "apicases.xlsx")
@ddt
class TESTAdd(unittest.TestCase):
excel = ReadExcel(file_path, "add")
cases = excel.read_data()
request = SendRequest()
@classmethod
def setUpClass(cls):
"""管理员账户登录"""
url = conf.get("env", "url") + "/member/login"
data = {
"mobile_phone": conf.get("test_data", "admin_phone"),
"pwd": conf.get("test_data", "admin_pwd")
}
headers = eval(conf.get("env", "headers"))
response = cls.request.send(url=url, method="post", json=data, headers=headers)
res = response.json()
token = jsonpath.jsonpath(res, "$..token")[0]
token_type = jsonpath.jsonpath(res, "$..token_type")[0]
member_id = str(jsonpath.jsonpath(res, "$..id")[0])
# 将提取的数据保存到CaseData的属性中
CaseDate.admin_token_value = token_type + " " + token
CaseDate.admin_member_id = member_id
@data(*cases)
def test_add(self, case):
# 第一步:准备数据
url = conf.get("env", "url") + case["url"]
headers = eval(conf.get("env", "headers"))
headers["Authorization"] = getattr(CaseDate, "admin_token_value")
data = eval(replace_data(case["data"]))
expected = eval(case["expected"])
method = case["method"]
row = case["case_id"] + 1
# 第二步:发请求获取实际结果
response = self.request.send(url=url, method=method, json=data, headers=headers)
res = response.json()
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(expected["code"], res["code"])
self.assertEqual(expected["msg"], res["msg"])
# 数据库校验
except AssertionError as e:
print("预期结果:", expected)
print("实际结果:", res)
self.excel.write_data(row=row, column=8, value="未通过")
log.error("用例:{},执行未通过".format(case["title"]))
log.exception(e)
raise e
else:
self.excel.write_data(row=row, column=8, value="通过")
log.info("用例:{},执行未通过".format(case["title"]))
| [
"[email protected]"
]
| |
3d058190a3b777ae763a5449f9e672a762accbc5 | 5a01774b1815a3d9a5b02b26ca4d6ba9ecf41662 | /Module 2/Chapter04/django-myproject-04/likes/views.py | 8ee413f495c99889e0eb88da77a52dc41aa3f015 | [
"MIT"
]
| permissive | PacktPublishing/Django-Web-Development-with-Python | bf08075ff0a85df41980cb5e272877e01177fd07 | 9f619f56553b5f0bca9b5ee2ae32953e142df1b2 | refs/heads/master | 2023-04-27T22:36:07.610076 | 2023-01-30T08:35:11 | 2023-01-30T08:35:11 | 66,646,080 | 39 | 41 | MIT | 2023-04-17T10:45:45 | 2016-08-26T12:30:45 | Python | UTF-8 | Python | false | false | 1,718 | py | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from .models import Like
from .templatetags.likes_tags import get_likes_count
@never_cache
@csrf_exempt
def json_set_like(request, content_type_id, object_id):
"""
Sets the object as a favorite for the current user
"""
result = {
"success": False,
}
if request.user.is_authenticated() and request.method == "POST":
content_type = ContentType.objects.get(id=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
like, is_created = Like.objects.get_or_create(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
user=request.user,
)
if not is_created:
like.delete()
result = {
"success": True,
"obj": unicode(obj),
"action": is_created and "added" or "removed",
"count": get_likes_count(obj),
}
json_str = json.dumps(result, ensure_ascii=False, encoding="utf8")
return HttpResponse(json_str, content_type="application/json; charset=utf-8")
@login_required(login_url=reverse_lazy("admin:login"))
def liked_object_list(request):
likes = Like.objects.filter(user=request.user)
return render(request, "likes/liked_object_list.html", {"object_list": likes}) | [
"[email protected]"
]
| |
6d53af45c900b528d3d52855a977da0f57432d5a | eee87bac78475dbb5b88f91bc1bc68ed63f28e5f | /virtual/bin/django-admin.py | 512950e949faf86b67ae5f3628fd36916aa91064 | [
"MIT"
]
| permissive | markmurimi/neighbour-hood | 8c595ee732ead18a33be8a6ab49078d9fc27c4d6 | dc5a3aaefd77958ca4990e651e0ba8f0b22d7cf8 | refs/heads/master | 2020-03-18T14:06:58.002371 | 2018-05-30T12:09:19 | 2018-05-30T12:09:19 | 134,830,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #!/home/mark/Documents/neighbour-watch/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
]
| |
afa4af83ece4b4704127d44f4e5527dced662658 | dd204762b0a9cdd93634e15ec981b5f868ec4315 | /apps/documents/migrations/0004_invoice.py | 6d174b1c1c2357bab34e08fcb0a6a92b8b9b6634 | []
| no_license | pannkotsky/sales_outlet | 49713b39759d8c3dbedfce96953ba9c47db3d521 | 15753582f8413a98ad7259bb6a3d62e32415f632 | refs/heads/master | 2021-08-30T11:05:23.589421 | 2017-12-17T16:19:56 | 2017-12-17T16:19:56 | 112,077,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-29 20:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20171129_2118'),
('documents', '0003_auto_20171129_2210'),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('number', models.CharField(max_length=15, primary_key=True, serialize=False, verbose_name='Number')),
('date', models.DateField(verbose_name='Date')),
('product_quantity', models.IntegerField(verbose_name='Product quantity')),
('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='documents.Contract', verbose_name='Contract')),
('packaging', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invoices', to='products.Packaging', verbose_name='Packaging')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='products.Product', verbose_name='Product')),
],
options={
'verbose_name': 'Invoice',
'verbose_name_plural': 'Invoices',
},
),
]
| [
"[email protected]"
]
| |
23e5189244ab17f8795a0d2d136873de29c91f73 | 74482894c61156c13902044b4d39917df8ed9551 | /cryptoapis/model/address_tokens_transaction_unconfirmed_ethereumerc721token.py | 78e6e8cccb380fb74e25368b6c6d05792ff6fb74 | [
"MIT"
]
| permissive | xan187/Crypto_APIs_2.0_SDK_Python | bb8898556ba014cc7a4dd31b10e24bec23b74a19 | a56c75df54ef037b39be1315ed6e54de35bed55b | refs/heads/main | 2023-06-22T15:45:08.273635 | 2021-07-21T03:41:05 | 2021-07-21T03:41:05 | 387,982,780 | 1 | 0 | NOASSERTION | 2021-07-21T03:35:29 | 2021-07-21T03:35:29 | null | UTF-8 | Python | false | false | 7,724 | py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedEthereumerc721token(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'symbol': (str,), # noqa: E501
'token_id': (str,), # noqa: E501
'contract_address': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'symbol': 'symbol', # noqa: E501
'token_id': 'tokenId', # noqa: E501
'contract_address': 'contractAddress', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, symbol, token_id, contract_address, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedEthereumerc721token - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
symbol (str): Specifies an identifier of the token, where up to five alphanumeric characters can be used for it.
token_id (str): Specifies the unique ID of the token.
contract_address (str): Specifies the address of the contract.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.symbol = symbol
self.token_id = token_id
self.contract_address = contract_address
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
]
| |
edb68d6f607d6ffce8ccd371008cdda62d9db733 | afebbb07b2b4eada17a5853c1ce63b4075d280df | /marketsim/gen/_out/math/_source.py | fe0ac6dcc7f72a71b5423b066a055fd7a079c5e3 | []
| no_license | peter1000/marketsimulator | 8c0a55fc6408b880311d3ad49defc55e9af57824 | 1b677200a9d5323f2970c83f076c2b83d39d4fe6 | refs/heads/master | 2021-01-18T01:39:04.869755 | 2015-03-29T17:47:24 | 2015-03-29T17:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,812 | py | # generated with class generator.python.accessor$Import
from marketsim import registry
from marketsim.gen._out.math._moving import Moving
@registry.expose(["-", "Source"])
class Source_mathMoving(object):
"""
"""
def __init__(self, x = None):
from marketsim.gen._out.math._moving import Moving_IObservableFloatFloat as _math_Moving_IObservableFloatFloat
from marketsim import deref_opt
self.x = x if x is not None else deref_opt(_math_Moving_IObservableFloatFloat())
@property
def label(self):
return repr(self)
_properties = {
'x' : Moving
}
def __repr__(self):
return "Moving_{%(timeframe)s}(%(source)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
self.x.bind_ex(self._ctx_ex)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.x.reset_ex(generation)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out.math._moving import Moving
rtti.typecheck(Moving, self.x)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.x.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
self.__dict__['_processing_ex'] = False
@property
def dereference(self):
return self.x.source
# generated with class generator.python.accessor$Import
from marketsim import registry
from marketsim.gen._out.math._ew import EW
@registry.expose(["-", "Source"])
class Source_mathEW(object):
"""
"""
def __init__(self, x = None):
from marketsim.gen._out.math._ew import EW_IObservableFloatFloat as _math_EW_IObservableFloatFloat
from marketsim import deref_opt
self.x = x if x is not None else deref_opt(_math_EW_IObservableFloatFloat())
@property
def label(self):
return repr(self)
_properties = {
'x' : EW
}
def __repr__(self):
return "EW_{%(alpha)s}(%(source)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
self.x.bind_ex(self._ctx_ex)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.x.reset_ex(generation)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out.math._ew import EW
rtti.typecheck(EW, self.x)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.x.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
self.__dict__['_processing_ex'] = False
@property
def dereference(self):
return self.x.source
# generated with class generator.python.accessor$Import
from marketsim import registry
from marketsim.gen._out.math._macd import macd
@registry.expose(["-", "Source"])
class Source_mathmacd(object):
"""
"""
def __init__(self, x = None):
from marketsim.gen._out.math._macd import macd_IObservableFloatFloatFloat as _math_macd_IObservableFloatFloatFloat
from marketsim import deref_opt
self.x = x if x is not None else deref_opt(_math_macd_IObservableFloatFloatFloat())
@property
def label(self):
return repr(self)
_properties = {
'x' : macd
}
def __repr__(self):
return "MACD_{%(fast)s}^{%(slow)s}(%(source)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
self.x.bind_ex(self._ctx_ex)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.x.reset_ex(generation)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out.math._macd import macd
rtti.typecheck(macd, self.x)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.x.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
self.__dict__['_processing_ex'] = False
@property
def dereference(self):
return self.x.source
# generated with class generator.python.accessor$Import
from marketsim import registry
from marketsim.gen._out.math._rsi import RSI
@registry.expose(["-", "Source"])
class Source_mathRSI(object):
"""
"""
def __init__(self, x = None):
from marketsim.gen._out.math._rsi import RSI_IObservableFloatFloatFloat as _math_RSI_IObservableFloatFloatFloat
from marketsim import deref_opt
self.x = x if x is not None else deref_opt(_math_RSI_IObservableFloatFloatFloat())
@property
def label(self):
return repr(self)
_properties = {
'x' : RSI
}
def __repr__(self):
return "RSIRaw_{%(timeframe)s}^{%(alpha)s}(%(source)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
self.x.bind_ex(self._ctx_ex)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.x.reset_ex(generation)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out.math._rsi import RSI
rtti.typecheck(RSI, self.x)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.x.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
self.__dict__['_processing_ex'] = False
@property
def dereference(self):
return self.x.source
# generated with class generator.python.accessor$Import
from marketsim import registry
from marketsim.gen._out.math._cumulative import Cumulative
@registry.expose(["-", "Source"])
class Source_mathCumulative(object):
"""
"""
def __init__(self, x = None):
from marketsim.gen._out.math._cumulative import Cumulative_IObservableFloat as _math_Cumulative_IObservableFloat
from marketsim import deref_opt
self.x = x if x is not None else deref_opt(_math_Cumulative_IObservableFloat())
@property
def label(self):
return repr(self)
_properties = {
'x' : Cumulative
}
def __repr__(self):
return "Source(%(x)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
self.x.bind_ex(self._ctx_ex)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.x.reset_ex(generation)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out.math._cumulative import Cumulative
rtti.typecheck(Cumulative, self.x)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.x.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
self.__dict__['_processing_ex'] = False
@property
def dereference(self):
return self.x.source
def Source(x = None):
from marketsim import rtti
from marketsim.gen._out.math._macd import macd
from marketsim.gen._out.math._moving import Moving
from marketsim.gen._out.math._rsi import RSI
from marketsim.gen._out.math._cumulative import Cumulative
from marketsim.gen._out.math._ew import EW
if x is None or rtti.can_be_casted(x, Moving):
return Source_mathMoving(x)
if x is None or rtti.can_be_casted(x, EW):
return Source_mathEW(x)
if x is None or rtti.can_be_casted(x, macd):
return Source_mathmacd(x)
if x is None or rtti.can_be_casted(x, RSI):
return Source_mathRSI(x)
if x is None or rtti.can_be_casted(x, Cumulative):
return Source_mathCumulative(x)
raise Exception('Cannot find suitable overload for Source('+str(x) +':'+ str(type(x))+')')
| [
"[email protected]"
]
| |
3ebe10cba243e3e9beff7ce90b952a15a1e05a57 | 7bcec8a9c6a240ec0888bec4179f536046464005 | /moviesys/moviesys/.history/library/views_20210324175438.py | b6544870318d275779993f680f8eb2a059283e1a | []
| no_license | yifanzhang13/MovieManagementSystem_group5 | c64e5810914c3d33ae6cd94e8eed5dc5a3962181 | 4cca1a4299311681d69b2347ca8d7b02e0846ebc | refs/heads/main | 2023-03-29T08:30:26.655108 | 2021-04-01T15:42:52 | 2021-04-01T15:42:52 | 344,417,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,616 | py | from django.shortcuts import render
from .models import Movies, Users, Ratings, Links, Tags
from django.db import connection
from django.views import generic
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from library.forms import SearchMovieForm
# Create your views here.
def index(request):
cursor = connection.cursor()
try:
num_movies = cursor.execute('SELECT * FROM library_movies')
rating_5 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 5')
rating_4 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 4')
rating_3 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 3')
rating_2 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 2')
rating_1 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 1')
finally:
cursor.close()
context = {
'num_movies':num_movies,
'rating_5':rating_5,
'rating_4':rating_4,
'rating_3':rating_3,
'rating_2':rating_2,
'rating_1':rating_1,
}
return render(request, 'index.html', context=context)
def MoviesView(request):
cursor = connection.cursor()
try:
movies = cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'Movies.html', context=context)
class MovieDetailView(generic.DetailView):
model = Movies
def MovieDetail(request):
# form = SearchMovieForm()
# if request.method == 'POST':
# form = SearchMovieForm(request.POST)
# if form.is_valid():
# return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
# context = {
# 'form': form,
# }
# return render(request, 'library/movies_list.html', context)
class MoviesListView(generic.ListView):
# The generic view will query the database to get all records for the specified model
# (Movies) then render a template located
# at /locallibrary/catalog/templates/catalog/Movies_list.html (which we will create below).
# Within the template you can access the list of books with the
# template variable named object_list OR book_list (i.e. generically "the_model_name_list").
model = Movies | [
"[email protected]"
]
| |
0d3b8f95ece7e037e2f572adc7c258d76b25e936 | 6418c60849119c2e956bf534c4118ec4858de648 | /ax/modelbridge/transforms/one_hot.py | f16d151519ced8ad0ce026918093c5e33cc43449 | [
"MIT"
]
| permissive | MalkeshDalia/Ax | c1595871871bd18183ad03692487f33df760bfaa | f458275d96c858cddc835dfefd34114de34d8b28 | refs/heads/master | 2023-04-16T00:34:19.726437 | 2019-05-01T05:16:22 | 2019-05-01T05:16:23 | 184,374,837 | 1 | 0 | MIT | 2023-03-21T21:47:24 | 2019-05-01T05:51:10 | Jupyter Notebook | UTF-8 | Python | false | false | 6,387 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Dict, List, Optional, TypeVar
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ChoiceParameter, Parameter, ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig, TParameterization
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.rounding import (
randomized_onehot_round,
strict_onehot_round,
)
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
OH_PARAM_INFIX = "_OH_PARAM_"
T = TypeVar("T")
class OneHotEncoder:
"""Joins the two encoders needed for OneHot transform."""
int_encoder: LabelEncoder
label_binarizer: LabelBinarizer
def __init__(self, values: List[T]) -> None:
self.int_encoder = LabelEncoder().fit(values)
self.label_binarizer = LabelBinarizer().fit(self.int_encoder.transform(values))
def transform(self, labels: List[T]) -> np.ndarray:
"""One hot encode a list of labels."""
return self.label_binarizer.transform(self.int_encoder.transform(labels))
def inverse_transform(self, encoded_labels: List[T]) -> List[T]:
"""Inverse transorm a list of one hot encoded labels."""
return self.int_encoder.inverse_transform(
self.label_binarizer.inverse_transform(encoded_labels)
)
@property
def classes(self) -> np.ndarray:
"""Return number of classes discovered while fitting transform."""
return (
self.label_binarizer.classes_ # pyre-ignore[16]: missing attribute classes_
)
class OneHot(Transform):
"""Convert categorical parameters (unordered ChoiceParameters) to
one-hot-encoded parameters.
Does not convert task parameters.
Parameters will be one-hot-encoded, yielding a set of RangeParameters,
of type float, on [0, 1]. If there are two values, one single RangeParameter
will be yielded, otherwise there will be a new RangeParameter for each
ChoiceParameter value.
In the reverse transform, floats can be converted to a one-hot encoded vector
using one of two methods:
Strict rounding: Choose the maximum value. With levels ['a', 'b', 'c'] and
float values [0.2, 0.4, 0.3], the restored parameter would be set to 'b'.
Ties are broken randomly, so values [0.2, 0.4, 0.4] is randomly set to 'b'
or 'c'.
Randomized rounding: Sample from the distribution. Float values
[0.2, 0.4, 0.3] are transformed to 'a' w.p.
0.2/0.9, 'b' w.p. 0.4/0.9, or 'c' w.p. 0.3/0.9.
Type of rounding can be set using transform_config['rounding'] to either
'strict' or 'randomized'. Defaults to strict.
Transform is done in-place.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
config: Optional[TConfig] = None,
) -> None:
# Identify parameters that should be transformed
self.rounding = "strict"
if config is not None:
self.rounding = config.get("rounding", "strict")
self.encoder: Dict[str, OneHotEncoder] = {}
self.encoded_parameters: Dict[str, List[str]] = {}
for p in search_space.parameters.values():
if isinstance(p, ChoiceParameter) and not p.is_ordered and not p.is_task:
self.encoder[p.name] = OneHotEncoder(p.values)
nc = len(self.encoder[p.name].classes)
if nc == 2:
# Two levels handled in one parameter
self.encoded_parameters[p.name] = [p.name + OH_PARAM_INFIX]
else:
self.encoded_parameters[p.name] = [
"{}{}_{}".format(p.name, OH_PARAM_INFIX, i) for i in range(nc)
]
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, encoder in self.encoder.items():
if p_name in obsf.parameters:
vals = encoder.transform(labels=[obsf.parameters.pop(p_name)])[0]
updated_parameters: TParameterization = {
self.encoded_parameters[p_name][i]: v
for i, v in enumerate(vals)
}
obsf.parameters.update(updated_parameters)
return observation_features
def transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
transformed_parameters: Dict[str, Parameter] = {}
for p in search_space.parameters.values():
if p.name in self.encoded_parameters:
for new_p_name in self.encoded_parameters[p.name]:
transformed_parameters[new_p_name] = RangeParameter(
name=new_p_name,
parameter_type=ParameterType.FLOAT,
lower=0,
upper=1,
)
else:
transformed_parameters[p.name] = p
return SearchSpace(
parameters=list(transformed_parameters.values()),
parameter_constraints=[
pc.clone() for pc in search_space.parameter_constraints
],
)
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name in self.encoder.keys():
x = np.array(
[obsf.parameters.pop(p) for p in self.encoded_parameters[p_name]]
)
if self.rounding == "strict":
x = strict_onehot_round(x)
else:
x = randomized_onehot_round(x)
val = self.encoder[p_name].inverse_transform(encoded_labels=x[None, :])[
0
]
if isinstance(val, np.bool_):
val = bool(val) # Numpy bools don't serialize
obsf.parameters[p_name] = val
return observation_features
| [
"[email protected]"
]
| |
e843d418d54ba8d491c42c79498e47d2b6f448d8 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_axis15.py | 6fa48ec645b2b45ff9c7624ac1320000ad10b66f | [
"BSD-2-Clause-Views"
]
| permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,360 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis15.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45705856, 54518528]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'minor_unit': 0.4, 'major_unit': 2})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
]
| |
1abd899a1740721cefc860f28ccdc3395587f893 | eea01a7a4625b0ffada7f5ea9909537f159a706e | /api/tests/opentrons/protocol_engine/execution/test_gantry_mover.py | ac621cfee6533e3ca72547639e2cd5e30353cf50 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | XifanRD/opentrons | b9fe114232fba26d789a9c4cf24b6ec6338134dc | d02dc6f77c40d85daa1c37f2073e4672e5f5f445 | refs/heads/edge | 2023-07-09T06:43:13.526571 | 2023-06-30T01:12:18 | 2023-06-30T01:12:18 | 207,443,994 | 0 | 0 | Apache-2.0 | 2019-12-19T09:41:37 | 2019-09-10T02:05:43 | null | UTF-8 | Python | false | false | 15,231 | py | """Test gantry movement handler with hardware API."""
from __future__ import annotations
import pytest
from decoy import Decoy
from typing import TYPE_CHECKING
from opentrons.types import Mount, MountType, Point
from opentrons.hardware_control import API as HardwareAPI
from opentrons.hardware_control.types import (
CriticalPoint,
Axis as HardwareAxis,
)
from opentrons.hardware_control.errors import MustHomeError as HardwareMustHomeError
from opentrons.motion_planning import Waypoint
from opentrons.protocol_engine.state import StateView, PipetteLocationData
from opentrons.protocol_engine.types import MotorAxis, DeckPoint, CurrentWell
from opentrons.protocol_engine.errors import MustHomeError, InvalidAxisForRobotType
from opentrons.protocol_engine.execution.gantry_mover import (
HardwareGantryMover,
VirtualGantryMover,
create_gantry_mover,
VIRTUAL_MAX_OT3_HEIGHT,
)
if TYPE_CHECKING:
from opentrons.hardware_control.ot3api import OT3API
@pytest.fixture
def mock_hardware_api(decoy: Decoy) -> HardwareAPI:
"""Get a mock in the shape of a HardwareAPI."""
return decoy.mock(cls=HardwareAPI)
@pytest.fixture
def mock_state_view(decoy: Decoy) -> StateView:
"""Get a mock in the shape of a StateView."""
return decoy.mock(cls=StateView)
@pytest.fixture
def hardware_subject(
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
) -> HardwareGantryMover:
"""Create a GantryMover with its dependencies mocked out."""
return HardwareGantryMover(
hardware_api=mock_hardware_api,
state_view=mock_state_view,
)
@pytest.fixture
def virtual_subject(
mock_state_view: StateView,
) -> VirtualGantryMover:
"""Create a GantryMover with its dependencies mocked out."""
return VirtualGantryMover(state_view=mock_state_view)
async def test_create_gantry_movement_handler(
decoy: Decoy,
mock_state_view: StateView,
mock_hardware_api: HardwareAPI,
) -> None:
"""It should return virtual or real gantry movement handlers depending on config."""
decoy.when(mock_state_view.config.use_virtual_pipettes).then_return(False)
assert isinstance(
create_gantry_mover(
state_view=mock_state_view,
hardware_api=mock_hardware_api,
),
HardwareGantryMover,
)
decoy.when(mock_state_view.config.use_virtual_pipettes).then_return(True)
assert isinstance(
create_gantry_mover(
state_view=mock_state_view,
hardware_api=mock_hardware_api,
),
VirtualGantryMover,
)
async def test_get_position(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should get the position of the pipette with the hardware API."""
current_well = CurrentWell(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
)
decoy.when(
mock_state_view.motion.get_pipette_location("pipette-id", current_well)
).then_return(
PipetteLocationData(
mount=MountType.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
)
)
decoy.when(
await mock_hardware_api.gantry_position(
mount=Mount.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
fail_on_not_homed=True,
)
).then_return(Point(1, 2, 3))
result = await hardware_subject.get_position(
"pipette-id", current_well=current_well, fail_on_not_homed=True
)
assert result == Point(1, 2, 3)
async def test_get_position_raises(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should raise a MustHomeError."""
decoy.when(
mock_state_view.motion.get_pipette_location("pipette-id", None)
).then_return(
PipetteLocationData(
mount=MountType.LEFT,
critical_point=CriticalPoint.NOZZLE,
)
)
decoy.when(
await mock_hardware_api.gantry_position(
mount=Mount.LEFT,
critical_point=CriticalPoint.NOZZLE,
fail_on_not_homed=False,
)
).then_raise(HardwareMustHomeError("oh no"))
with pytest.raises(MustHomeError, match="oh no"):
await hardware_subject.get_position("pipette-id")
def test_get_max_travel_z(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should get the max travel z height with the hardware API."""
decoy.when(mock_state_view.pipettes.get_mount("pipette-id")).then_return(
MountType.RIGHT
)
decoy.when(
mock_hardware_api.get_instrument_max_height(mount=Mount.RIGHT)
).then_return(42.1)
assert hardware_subject.get_max_travel_z("pipette-id") == 42.1
async def test_move_to(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should move the gantry with the hardware API."""
decoy.when(mock_state_view.pipettes.get_mount("abc123")).then_return(
MountType.RIGHT
)
result = await hardware_subject.move_to(
pipette_id="abc123",
waypoints=[
Waypoint(position=Point(1, 2, 3), critical_point=CriticalPoint.TIP),
Waypoint(position=Point(4, 5, 6), critical_point=CriticalPoint.XY_CENTER),
],
speed=9001,
)
assert result == Point(4, 5, 6)
decoy.verify(
await mock_hardware_api.move_to(
mount=Mount.RIGHT,
abs_position=Point(1, 2, 3),
critical_point=CriticalPoint.TIP,
speed=9001,
),
await mock_hardware_api.move_to(
mount=Mount.RIGHT,
abs_position=Point(4, 5, 6),
critical_point=CriticalPoint.XY_CENTER,
speed=9001,
),
)
async def test_move_relative(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should move the gantry by the delta with the hardware API."""
decoy.when(mock_state_view.motion.get_pipette_location("pipette-id")).then_return(
PipetteLocationData(
mount=MountType.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
)
)
decoy.when(
await mock_hardware_api.gantry_position(
mount=Mount.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
fail_on_not_homed=True,
)
).then_return(Point(4, 5, 6))
result = await hardware_subject.move_relative(
pipette_id="pipette-id",
delta=Point(1, 2, 3),
speed=9001,
)
assert result == Point(4, 5, 6)
# TODO(mc, 2022-05-13): the order of these calls is difficult to manage
# and test for. Ideally, `hardware.move_rel` would return the resulting position
decoy.verify(
await mock_hardware_api.move_rel(
mount=Mount.RIGHT,
delta=Point(1, 2, 3),
fail_on_not_homed=True,
speed=9001,
),
times=1,
)
async def test_move_relative_must_home(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
mock_state_view: StateView,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should raise a MustHomeError."""
decoy.when(mock_state_view.motion.get_pipette_location("pipette-id")).then_return(
PipetteLocationData(
mount=MountType.LEFT,
critical_point=CriticalPoint.XY_CENTER,
)
)
decoy.when(
await mock_hardware_api.move_rel(
mount=Mount.LEFT,
delta=Point(x=1, y=2, z=3),
fail_on_not_homed=True,
speed=456.7,
)
).then_raise(HardwareMustHomeError("oh no"))
with pytest.raises(MustHomeError, match="oh no"):
await hardware_subject.move_relative(
pipette_id="pipette-id",
delta=Point(x=1, y=2, z=3),
speed=456.7,
)
async def test_home(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwareGantryMover,
mock_state_view: StateView,
) -> None:
"""It should home a set of axes."""
decoy.when(mock_state_view.config.robot_type).then_return("OT-2 Standard")
await hardware_subject.home(
axes=[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.LEFT_Z,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_PLUNGER,
MotorAxis.RIGHT_PLUNGER,
]
)
decoy.verify(
await mock_hardware_api.home(
axes=[
HardwareAxis.X,
HardwareAxis.Y,
HardwareAxis.Z,
HardwareAxis.A,
HardwareAxis.B,
HardwareAxis.C,
]
),
times=1,
)
decoy.reset()
await hardware_subject.home(axes=None)
decoy.verify(await mock_hardware_api.home(), times=1)
decoy.reset()
await hardware_subject.home(axes=[])
decoy.verify(await mock_hardware_api.home(axes=[]), times=1)
async def test_ot2_home_fails_with_ot3_axes(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwareGantryMover,
mock_state_view: StateView,
) -> None:
"""It should raise an error when homing axes that don't exist on OT2."""
decoy.when(mock_state_view.config.robot_type).then_return("OT-2 Standard")
with pytest.raises(InvalidAxisForRobotType):
await hardware_subject.home(
axes=[
MotorAxis.LEFT_PLUNGER,
MotorAxis.RIGHT_PLUNGER,
MotorAxis.EXTENSION_Z,
MotorAxis.EXTENSION_JAW,
]
)
@pytest.mark.ot3_only
async def test_home_on_ot3(
decoy: Decoy,
ot3_hardware_api: OT3API,
mock_state_view: StateView,
) -> None:
"""Test homing all OT3 axes."""
subject = HardwareGantryMover(
state_view=mock_state_view, hardware_api=ot3_hardware_api
)
decoy.when(mock_state_view.config.robot_type).then_return("OT-3 Standard")
await subject.home(
axes=[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.LEFT_Z,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_PLUNGER,
MotorAxis.RIGHT_PLUNGER,
MotorAxis.EXTENSION_JAW,
MotorAxis.EXTENSION_Z,
]
)
decoy.verify(
await ot3_hardware_api.home(
axes=[
HardwareAxis.X,
HardwareAxis.Y,
HardwareAxis.Z,
HardwareAxis.A,
HardwareAxis.B,
HardwareAxis.C,
HardwareAxis.G,
HardwareAxis.Z_G,
]
),
)
# TODO(mc, 2022-12-01): this is overly complicated
# https://opentrons.atlassian.net/browse/RET-1287
async def test_home_z(
decoy: Decoy,
mock_hardware_api: HardwareAPI,
hardware_subject: HardwareGantryMover,
) -> None:
"""It should home a single Z axis and plunger."""
await hardware_subject.home(axes=[MotorAxis.LEFT_Z, MotorAxis.LEFT_PLUNGER])
decoy.verify(
await mock_hardware_api.home_z(Mount.LEFT),
await mock_hardware_api.home_plunger(Mount.LEFT),
)
decoy.reset()
await hardware_subject.home(axes=[MotorAxis.RIGHT_Z, MotorAxis.RIGHT_PLUNGER])
decoy.verify(
await mock_hardware_api.home_z(Mount.RIGHT),
await mock_hardware_api.home_plunger(Mount.RIGHT),
)
decoy.reset()
await hardware_subject.home(axes=[MotorAxis.LEFT_PLUNGER])
decoy.verify(
await mock_hardware_api.home_plunger(Mount.LEFT),
times=1,
)
decoy.reset()
await hardware_subject.home(axes=[MotorAxis.RIGHT_PLUNGER])
decoy.verify(
await mock_hardware_api.home_plunger(Mount.RIGHT),
times=1,
)
decoy.reset()
await hardware_subject.home(axes=[MotorAxis.RIGHT_Z, MotorAxis.LEFT_PLUNGER])
decoy.verify(
await mock_hardware_api.home([HardwareAxis.A, HardwareAxis.B]),
times=1,
)
async def test_virtual_get_position(
decoy: Decoy,
mock_state_view: StateView,
virtual_subject: VirtualGantryMover,
) -> None:
"""It should get the position of the pipette with the state store."""
decoy.when(mock_state_view.pipettes.get_deck_point("pipette-id")).then_return(
DeckPoint(x=1, y=2, z=3)
)
result = await virtual_subject.get_position("pipette-id")
assert result == Point(x=1, y=2, z=3)
async def test_virtual_get_position_default(
decoy: Decoy,
mock_state_view: StateView,
virtual_subject: VirtualGantryMover,
) -> None:
"""It should get a default Point if no stored deck point can be found in the state store."""
decoy.when(mock_state_view.pipettes.get_deck_point("pipette-id")).then_return(None)
result = await virtual_subject.get_position("pipette-id")
assert result == Point(x=0, y=0, z=0)
def test_virtual_get_max_travel_z_ot2(
decoy: Decoy,
mock_state_view: StateView,
virtual_subject: VirtualGantryMover,
) -> None:
"""It should get the max travel z height with the state store for an OT-2."""
decoy.when(mock_state_view.config.robot_type).then_return("OT-2 Standard")
decoy.when(
mock_state_view.pipettes.get_instrument_max_height_ot2("pipette-id")
).then_return(42)
decoy.when(mock_state_view.tips.get_tip_length("pipette-id")).then_return(20)
result = virtual_subject.get_max_travel_z("pipette-id")
assert result == 22.0
def test_virtual_get_max_travel_z_ot3(
decoy: Decoy,
mock_state_view: StateView,
virtual_subject: VirtualGantryMover,
) -> None:
"""It should get the max travel z height with the state store."""
decoy.when(mock_state_view.config.robot_type).then_return("OT-3 Standard")
decoy.when(mock_state_view.tips.get_tip_length("pipette-id")).then_return(48)
result = virtual_subject.get_max_travel_z("pipette-id")
assert result == VIRTUAL_MAX_OT3_HEIGHT - 48.0
async def test_virtual_move_relative(
decoy: Decoy,
mock_state_view: StateView,
virtual_subject: VirtualGantryMover,
) -> None:
"""It should simulate moving the gantry by the delta with the state store."""
decoy.when(mock_state_view.pipettes.get_deck_point("pipette-id")).then_return(
DeckPoint(x=1, y=2, z=3)
)
result = await virtual_subject.move_relative(
"pipette-id",
delta=Point(3, 2, 1),
speed=123,
)
assert result == Point(x=4, y=4, z=4)
async def test_virtual_move_to(
decoy: Decoy, virtual_subject: VirtualGantryMover
) -> None:
"""It should no-op on move to, returning the last waypoint."""
result = await virtual_subject.move_to(
pipette_id="abc123",
waypoints=[
Waypoint(position=Point(1, 2, 3), critical_point=CriticalPoint.TIP),
Waypoint(position=Point(4, 5, 6), critical_point=CriticalPoint.XY_CENTER),
],
speed=None,
)
assert result == Point(4, 5, 6)
| [
"[email protected]"
]
| |
2a0285638dadac1bca6388b2784153e889ab8ee8 | f9c7f734e13fa3c61347fe475306a6759940b860 | /python3_cron_scripts/create_tpd_graphs.py | 6662152a75dbe8d6b85b7000c5f2ae04c43bd22b | [
"Apache-2.0"
]
| permissive | DalavanCloud/Marinus | 48936f54395bae7c3e39dcffed77bb6fae3b473c | a9f3c4a54f6bf5c044121ac6d8d3d18a7a0e09d0 | refs/heads/master | 2020-04-29T12:32:17.645410 | 2019-03-16T00:37:11 | 2019-03-16T00:37:11 | 176,140,449 | 1 | 0 | null | 2019-03-17T18:07:18 | 2019-03-17T18:07:18 | null | UTF-8 | Python | false | false | 9,765 | py | #!/usr/bin/python3
# Copyright 2018 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This script assumes that the following scripts have already been run:
- Core scripts (zones, infoblox, sonar)
- extract_ssl_names
- extract_vt_names
- get_external_cnames
ERRATA:
"TPD" in this case means root domain ("example.org") and not the traditional usage of TPD
which refers to ".net", ".com", ".co.uk", etc.
"""
import json
import math
import re
import time
from datetime import datetime, timedelta
import networkx as nx
from netaddr import IPAddress, IPNetwork
from networkx.readwrite import json_graph
from libs3 import MongoConnector, JobsManager
from libs3.ZoneManager import ZoneManager
REPLACE_CHAR = "!"
def add_to_list(str_to_add, groups):
"""
This will add a string to the groups array if it does not exist.
It will then return the index of the string within the Array
"""
if str_to_add.replace(".", REPLACE_CHAR) not in groups:
groups.append(str_to_add.replace(".", REPLACE_CHAR))
return groups.index(str_to_add.replace(".", REPLACE_CHAR))
def find_zones_by_tld(graph, tpd, groups, mongo_connector):
"""
Technically, a "tld" is ".org" or ".com".
However, tld library that I use considers TLDs to be "example.org".
This code just rolls with that.
For the provided third-party-domain, find the zones that are associated with that tpd.
"""
tpds_collection = mongo_connector.get_tpds_connection()
tpds_results = tpds_collection.find({'tld': tpd})
for result in tpds_results:
for zone in result['zones']:
zone_g_index = add_to_list(zone['zone'], groups)
# A space is added because sometimes the tpd is the same as the target
graph.add_node(zone['zone'], data_type="zone", type=zone_g_index,
depends=[tpd + " "], dependedOnBy=[], docs="")
graph.add_edge(tpd + " ", zone['zone'], value=2)
for entry in zone['records']:
graph.add_node(entry['host'], data_type="domain", type=zone_g_index,
depends=[zone['zone']], dependedOnBy=[entry['target']], docs="")
graph.add_node(entry['target'], data_type="domain", type=zone_g_index,
depends=[entry['host']], dependedOnBy=[], docs="")
graph.add_edge(zone['zone'], entry['host'], value=1)
graph.add_edge(entry['host'], entry['target'], value=1)
def build_docs(node, zone, groups):
"""
Build the docs that are shown in the Graph UI when you click on a node
"""
html = "<h3>" + node['id'] + "</h3><br/>"
html += "<b>Type:</b> " + node['data_type'] + "<br/>"
html += "<b>Group:</b> " + groups[node['type']].replace(REPLACE_CHAR, ".") + "<br/>"
html += "<b>Depends:</b><br/>"
if node['depends'] == []:
html += "None<br/>"
else:
for dependency in node['depends']:
html += " " + dependency + ","
html = html[:-1] + "<br>"
html += "<b>Depended on by:</b><br>"
if node['dependedOnBy'] == []:
html += "None<br/>"
else:
for dependency in node['dependedOnBy']:
html += " " + dependency + ","
html = html[:-1] + "<br>"
if node['data_type'] == "tld":
# <a href=\"/zone?search=" + node['id'] +
# "\" target=\"_blank\">Link to full zone details</a>"
html += ""
else:
if groups[node['type']].replace(REPLACE_CHAR, ".") != zone:
html += ("<a href=\"/domain?search=" + node['id'] +
"\" target=\"_blank\">Link to full host details</a>")
else:
html += ("<a href=\"/zone?search=" + node['id'] +
"\" target=\"_blank\">Link to full host details</a>")
return html
def reformat_data(data, tpd, groups):
"""
Reformat the data object and add the docs properties for d3.js compliance
"""
for i in range(0, len(data['nodes'])):
data['nodes'][i]['name'] = data['nodes'][i]['id']
data['nodes'][i]['group'] = groups[data['nodes'][i]['type']]
def get_tpds(mongo_connector):
"""
Create the list of third-party domains
"""
tpds_collection = mongo_connector.get_tpds_connection()
tpd_results = tpds_collection.find({})
tpds = []
for rec in tpd_results:
tpds.append(rec['tld'])
return tpds
def main():
"""
The main thread for this program.
"""
now = datetime.now()
print("Starting: " + str(now))
mongo_connector = MongoConnector.MongoConnector()
jobs_manager = JobsManager.JobsManager(mongo_connector, 'create_tpd_graphs')
jobs_manager.record_job_start()
zones = ZoneManager.get_distinct_zones(mongo_connector)
tpds = get_tpds(mongo_connector)
# For third-party-domain in the list of third-party-domains
for tpd in tpds:
groups = []
graph = nx.DiGraph()
add_to_list(tpd, groups)
# A space is added because sometimes the tpd is the same as the end target node
graph.add_node(tpd + " ", data_type="tld", type=0, depends=[],
dependedOnBy=[], docs="<h1>Parent</h1>")
# Get the zones associated with the tpd
find_zones_by_tld(graph, tpd, groups, mongo_connector)
data = json_graph.node_link_data(graph)
reformat_data(data, tpd, groups)
new_data = {}
new_data['directed'] = data['directed']
new_data['graph'] = data['graph']
new_data['multigraph'] = data['multigraph']
new_data['errs'] = []
new_data['links'] = data['links']
new_data['data'] = {}
for i in range(0, len(data['nodes'])):
new_data['data'][data['nodes'][i]['id'].replace(".", REPLACE_CHAR)] = data['nodes'][i]
for entry in new_data['data']:
for dep in new_data['data'][entry]['depends']:
if new_data['data'][entry]['name'] not in new_data['data'][dep.replace(".", REPLACE_CHAR)]['dependedOnBy']:
new_data['data'][dep.replace(".", REPLACE_CHAR)]['dependedOnBy'].append(new_data['data'][entry]['name'])
for dep in new_data['data'][entry]['dependedOnBy']:
if new_data['data'][entry]['name'] not in new_data['data'][dep.replace(".", REPLACE_CHAR)]['depends']:
new_data['data'][dep.replace(".", REPLACE_CHAR)]['depends'].append(new_data['data'][entry]['name'])
for entry in new_data['data']:
new_data['data'][entry]['docs'] = build_docs(new_data['data'][entry], tpd, groups)
config = {}
config['title'] = tpd + " Network Map"
config['graph'] = {}
config['graph']['linkDistance'] = 150
config['graph']['charge'] = -400
config['graph']['height'] = 800
config['graph']['numColors'] = len(groups)
config['graph']['labelPadding'] = {"left": 3, "right": 3, "top": 2, "bottom": 2}
config['graph']['labelMargin'] = {"left": 3, "right": 3, "top": 2, "bottom": 2}
config['graph']['ticksWithoutCollisions'] = 50
config['graph_type'] = "tpd"
config['types'] = {}
regex_str = "^[0-9]+\\.[0-9]+\\.[0-9]+$"
regx = re.compile(regex_str)
for tgroup in groups:
data_type = "tpd"
group = tgroup.replace(REPLACE_CHAR, ".")
if group in zones:
data_type = "tracked_domain"
elif re.match(regx, group):
data_type = "cidr"
config['types'][tgroup] = {"short": group,
"long": "A group from the network: " + group,
"data_type": data_type}
config['constraints'] = []
tmp = int(math.ceil(math.sqrt(len(groups)))) + 1
x = []
y = []
for i in range(1, tmp):
val = round((i * 1.0)/tmp, 2)
x.append(str(val))
y.append(str(val))
x_pos = 0
y_pos = 0
for group in groups:
config['constraints'].append({"has": {"type": group},
"type": "position",
"x": x[x_pos],
"y": y[y_pos]})
x_pos = x_pos + 1
if x_pos >= len(x):
x_pos = 0
y_pos = y_pos + 1
config['jsonUrl'] = "/api/v1.0/tpd_graphs/" + tpd
new_data['config'] = config
new_data['created'] = datetime.now()
new_data['zone'] = tpd
tpd_graphs_collection = mongo_connector.get_tpd_graphs_connection()
tpd_graphs_collection.remove({'zone': tpd})
try:
tpd_graphs_collection.insert_one(new_data)
except:
print("ERROR: Could not insert " + tpd)
time.sleep(1)
# Remove last week's old entries
lastweek = datetime.now() - timedelta(days=7)
tpd_graphs_collection.remove({'created': {"$lt": lastweek}})
# Record status
jobs_manager.record_job_complete()
now = datetime.now()
print("Complete: " + str(now))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
5502baa5078bace5351800777e9e71092e41e9ec | 5779d964d5ee42b586697a640ff0f977e0fa1e55 | /test/test_access_approval_services_api.py | 1e9f8cd053340a2548c2d46ca89069e1132a2cae | []
| no_license | thomasyu888/synpy-sdk-client | 03db42c3c8411c8c1f8808e1145d7c2a8bcc3df1 | d1e19e26db5376c78c4ce0ff181ac3c4e0709cbb | refs/heads/main | 2023-02-28T09:33:12.386220 | 2021-02-02T15:09:59 | 2021-02-02T15:09:59 | 333,744,741 | 3 | 0 | null | 2021-01-30T12:10:50 | 2021-01-28T11:57:48 | Python | UTF-8 | Python | false | false | 1,014 | py | # coding: utf-8
"""
Platform Repository Service
Platform Repository Service - Sage Bionetworks Platform # noqa: E501
The version of the OpenAPI document: develop-SNAPSHOT
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import synclient
from synclient.api.access_approval_services_api import AccessApprovalServicesApi # noqa: E501
from synclient.rest import ApiException
class TestAccessApprovalServicesApi(unittest.TestCase):
"""AccessApprovalServicesApi unit test stubs"""
def setUp(self):
self.api = synclient.api.access_approval_services_api.AccessApprovalServicesApi() # noqa: E501
def tearDown(self):
pass
def test_get_team_access_approvals(self):
"""Test case for get_team_access_approvals
Retrieve the Access Approvals for the given Team. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
ab0a941410dac6aa32f28e272e64bd732c1ef4af | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/fluid/tests/unittests/test_input_spec.py | e329a37488a2cb8234532cd0a9beb7a1a25e72a6 | [
"Apache-2.0"
]
| permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 4,615 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.static import InputSpec
from paddle.fluid.framework import core, convert_np_dtype_to_dtype_
class TestInputSpec(unittest.TestCase):
def test_default(self):
tensor_spec = InputSpec([3, 4])
self.assertEqual(tensor_spec.dtype,
convert_np_dtype_to_dtype_('float32'))
self.assertEqual(tensor_spec.name, None)
def test_from_tensor(self):
x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)
bool_spec = InputSpec.from_tensor(x_bool)
self.assertEqual(bool_spec.dtype, x_bool.dtype)
self.assertEqual(bool_spec.shape, x_bool.shape)
self.assertEqual(bool_spec.name, x_bool.name)
bool_spec2 = InputSpec.from_tensor(x_bool, name='bool_spec')
self.assertEqual(bool_spec2.name, bool_spec2.name)
def test_from_numpy(self):
x_numpy = np.ones([10, 12])
x_np_spec = InputSpec.from_numpy(x_numpy)
self.assertEqual(x_np_spec.dtype,
convert_np_dtype_to_dtype_(x_numpy.dtype))
self.assertEqual(x_np_spec.shape, x_numpy.shape)
self.assertEqual(x_np_spec.name, None)
x_numpy2 = np.array([1, 2, 3, 4]).astype('int64')
x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64')
self.assertEqual(x_np_spec2.dtype,
convert_np_dtype_to_dtype_(x_numpy2.dtype))
self.assertEqual(x_np_spec2.shape, x_numpy2.shape)
self.assertEqual(x_np_spec2.name, 'x_np_int64')
def test_shape_with_none(self):
tensor_spec = InputSpec([None, 4, None], dtype='int8', name='x_spec')
self.assertEqual(tensor_spec.dtype, convert_np_dtype_to_dtype_('int8'))
self.assertEqual(tensor_spec.name, 'x_spec')
self.assertEqual(tensor_spec.shape, (-1, 4, -1))
def test_shape_raise_error(self):
# 1. shape should only contain int and None.
with self.assertRaises(ValueError):
tensor_spec = InputSpec(['None', 4, None], dtype='int8')
# 2. shape should be type `list` or `tuple`
with self.assertRaises(TypeError):
tensor_spec = InputSpec(4, dtype='int8')
# 3. len(shape) should be greater than 0.
with self.assertRaises(ValueError):
tensor_spec = InputSpec([], dtype='int8')
def test_batch_and_unbatch(self):
tensor_spec = InputSpec([10])
# insert batch_size
batch_tensor_spec = tensor_spec.batch(16)
self.assertEqual(batch_tensor_spec.shape, (16, 10))
# unbatch
unbatch_spec = batch_tensor_spec.unbatch()
self.assertEqual(unbatch_spec.shape, (10, ))
# 1. `unbatch` requires len(shape) > 1
with self.assertRaises(ValueError):
unbatch_spec.unbatch()
# 2. `batch` requires len(batch_size) == 1
with self.assertRaises(ValueError):
tensor_spec.batch([16, 12])
# 3. `batch` requires type(batch_size) == int
with self.assertRaises(TypeError):
tensor_spec.batch('16')
def test_eq_and_hash(self):
tensor_spec_1 = InputSpec([10, 16], dtype='float32')
tensor_spec_2 = InputSpec([10, 16], dtype='float32')
tensor_spec_3 = InputSpec([10, 16], dtype='float32', name='x')
tensor_spec_4 = InputSpec([16], dtype='float32', name='x')
# override ``__eq__`` according to [shape, dtype, name]
self.assertTrue(tensor_spec_1 == tensor_spec_2)
self.assertTrue(tensor_spec_1 != tensor_spec_3) # different name
self.assertTrue(tensor_spec_3 != tensor_spec_4) # different shape
# override ``__hash__`` according to [shape, dtype]
self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_2))
self.assertTrue(hash(tensor_spec_1) == hash(tensor_spec_3))
self.assertTrue(hash(tensor_spec_3) != hash(tensor_spec_4))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
d2e7091ee4d9e58e5d8ff8e416f1ef6483e5ea80 | eea01a7a4625b0ffada7f5ea9909537f159a706e | /hardware/opentrons_hardware/firmware_bindings/messages/payloads.py | 223a929c942dffb18dcae8331ee3f4f04cfd0bf4 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | XifanRD/opentrons | b9fe114232fba26d789a9c4cf24b6ec6338134dc | d02dc6f77c40d85daa1c37f2073e4672e5f5f445 | refs/heads/edge | 2023-07-09T06:43:13.526571 | 2023-06-30T01:12:18 | 2023-06-30T01:12:18 | 207,443,994 | 0 | 0 | Apache-2.0 | 2019-12-19T09:41:37 | 2019-09-10T02:05:43 | null | UTF-8 | Python | false | false | 17,246 | py | """Payloads of can bus messages."""
# TODO (amit, 2022-01-26): Figure out why using annotations import ruins
# dataclass fields interpretation.
# from __future__ import annotations
from dataclasses import dataclass, field, asdict
from . import message_definitions
from typing import Iterator, List
from .fields import (
FirmwareShortSHADataField,
VersionFlagsField,
TaskNameDataField,
ToolField,
FirmwareUpdateDataField,
ErrorSeverityField,
ErrorCodeField,
SensorTypeField,
SensorIdField,
PipetteNameField,
SensorOutputBindingField,
EepromDataField,
SerialField,
SerialDataCodeField,
SensorThresholdModeField,
PipetteTipActionTypeField,
MotorPositionFlagsField,
MoveStopConditionField,
GearMotorIdField,
OptionalRevisionField,
MotorUsageTypeField,
)
from .. import utils
@dataclass(eq=False)
class EmptyPayload(utils.BinarySerializable):
"""An empty payload."""
def __eq__(self, other: object) -> bool:
"""Override __eq__ to ignore message_index."""
other_dict = vars(other)
self_dict = vars(self)
for key in self_dict:
if key != "message_index":
if not (key in other_dict and self_dict[key] == other_dict[key]):
return False
return True
# oh boy would it be great to have python 3.10 so we could use the kw_only thing here
# we can't have it as a normal arg becuase we'd have to initalize it everywhere we make a message
# and we can't just have it set as a default becuase we get a TypeError for initizling the non-default
# args of subclasses after this default arg.
# to work around this in binary_serializable.build() and can_comm.prompt_payload
# we ignore the message_index when constructing args and then set the value manually after
message_index: utils.UInt32Field = field(
init=False, default=utils.UInt32Field(None) # type: ignore[arg-type]
)
@dataclass(eq=False)
class ErrorMessagePayload(EmptyPayload):
"""Message sent from firmware in the event of an error."""
severity: ErrorSeverityField
error_code: ErrorCodeField
@dataclass(eq=False)
class _DeviceInfoResponsePayloadBase(EmptyPayload):
version: utils.UInt32Field
flags: VersionFlagsField
shortsha: FirmwareShortSHADataField
@dataclass(eq=False)
class DeviceInfoResponsePayload(_DeviceInfoResponsePayloadBase):
"""Device info response."""
@classmethod
def build(cls, data: bytes) -> "DeviceInfoResponsePayload":
"""Build a response payload from incoming bytes.
This override is required to handle optionally-present revision data.
"""
consumed_by_super = _DeviceInfoResponsePayloadBase.get_size()
superdict = asdict(_DeviceInfoResponsePayloadBase.build(data))
message_index = superdict.pop("message_index")
# we want to parse this by adding extra 0s that may not be necessary,
# which is annoying and complex, so let's wrap it in an iterator
def _data_for_optionals(consumed: int, buf: bytes) -> Iterator[bytes]:
extended = buf + b"\x00\x00\x00\x00"
yield extended[consumed:]
consumed += 4
extended = extended + b"\x00"
yield extended[consumed : consumed + 1]
optionals_yielder = _data_for_optionals(consumed_by_super, data)
inst = cls(
**superdict,
revision=OptionalRevisionField.build(next(optionals_yielder)),
subidentifier=utils.UInt8Field.build(
int.from_bytes(next(optionals_yielder), "big")
),
)
inst.message_index = message_index
return inst
revision: OptionalRevisionField
subidentifier: utils.UInt8Field
@dataclass(eq=False)
class TaskInfoResponsePayload(EmptyPayload):
"""Task info response payload."""
name: TaskNameDataField
runtime_counter: utils.UInt32Field
stack_high_water_mark: utils.UInt32Field
state: utils.UInt16Field
priority: utils.UInt16Field
@dataclass(eq=False)
class GetStatusResponsePayload(EmptyPayload):
"""Get status response."""
status: utils.UInt8Field
data: utils.UInt32Field
@dataclass(eq=False)
class MoveRequestPayload(EmptyPayload):
"""Move request."""
steps: utils.UInt32Field
@dataclass(eq=False)
class GetSpeedResponsePayload(EmptyPayload):
"""Get speed response."""
mm_sec: utils.UInt32Field
@dataclass(eq=False)
class EEPromReadPayload(EmptyPayload):
"""Eeprom read request payload ."""
address: utils.UInt16Field
data_length: utils.UInt16Field
@dataclass(eq=False)
class EEPromDataPayload(EEPromReadPayload):
"""Eeprom payload with data."""
data: EepromDataField
@dataclass(eq=False)
class MoveGroupRequestPayload(EmptyPayload):
"""A payload with a group id."""
group_id: utils.UInt8Field
@dataclass(eq=False)
class MoveGroupResponsePayload(EmptyPayload):
"""A response payload with a group id."""
group_id: utils.UInt8Field
@dataclass(eq=False)
class AddToMoveGroupRequestPayload(MoveGroupRequestPayload):
"""Base of add to move group request to a message group."""
seq_id: utils.UInt8Field
duration: utils.UInt32Field
@dataclass(eq=False)
class AddLinearMoveRequestPayload(AddToMoveGroupRequestPayload):
"""Add a linear move request to a message group."""
acceleration_um: utils.Int32Field
velocity_mm: utils.Int32Field
request_stop_condition: MoveStopConditionField
@dataclass(eq=False)
class HomeRequestPayload(AddToMoveGroupRequestPayload):
"""Request to home."""
velocity_mm: utils.Int32Field
@dataclass(eq=False)
class GetMoveGroupResponsePayload(MoveGroupResponsePayload):
"""Response to request to get a move group."""
num_moves: utils.UInt8Field
total_duration: utils.UInt32Field
@dataclass(eq=False)
class ExecuteMoveGroupRequestPayload(MoveGroupRequestPayload):
"""Start executing a move group."""
start_trigger: utils.UInt8Field
cancel_trigger: utils.UInt8Field
@dataclass(eq=False)
class MoveCompletedPayload(MoveGroupResponsePayload):
"""Notification of a completed move group."""
seq_id: utils.UInt8Field
current_position_um: utils.UInt32Field
encoder_position_um: utils.Int32Field
position_flags: MotorPositionFlagsField
ack_id: utils.UInt8Field
@dataclass(eq=False)
class MotorPositionResponse(EmptyPayload):
"""Read Encoder Position."""
current_position: utils.UInt32Field
encoder_position: utils.Int32Field
position_flags: MotorPositionFlagsField
@dataclass(eq=False)
class MotionConstraintsPayload(EmptyPayload):
"""The min and max velocity and acceleration of a motion system."""
min_velocity: utils.Int32Field
max_velocity: utils.Int32Field
min_acceleration: utils.Int32Field
max_acceleration: utils.Int32Field
@dataclass(eq=False)
class MotorDriverRegisterPayload(EmptyPayload):
"""Read motor driver register request payload."""
reg_addr: utils.UInt8Field
@dataclass(eq=False)
class MotorDriverRegisterDataPayload(MotorDriverRegisterPayload):
"""Write motor driver register request payload."""
data: utils.UInt32Field
@dataclass(eq=False)
class ReadMotorDriverRegisterResponsePayload(EmptyPayload):
"""Read motor driver register response payload."""
reg_addr: utils.UInt8Field
data: utils.UInt32Field
@dataclass(eq=False)
class MotorCurrentPayload(EmptyPayload):
"""Read motor current register payload."""
# All values in milliAmps
hold_current: utils.UInt32Field
run_current: utils.UInt32Field
@dataclass(eq=False)
class ReadPresenceSensingVoltageResponsePayload(EmptyPayload):
"""Read head presence sensing voltage response payload."""
# All values in millivolts
z_motor: utils.UInt16Field
a_motor: utils.UInt16Field
gripper: utils.UInt16Field
@dataclass(eq=False)
class ToolsDetectedNotificationPayload(EmptyPayload):
"""Tool detection notification."""
# Tools are mapped to an enum
z_motor: ToolField
a_motor: ToolField
gripper: ToolField
@dataclass(eq=False)
class FirmwareUpdateWithAddress(EmptyPayload):
"""A FW update payload with an address."""
address: utils.UInt32Field
@dataclass(eq=False)
class FirmwareUpdateData(FirmwareUpdateWithAddress):
"""A FW update data payload."""
num_bytes: utils.UInt8Field
reserved: utils.UInt8Field
data: FirmwareUpdateDataField
checksum: utils.UInt16Field
def __post_init__(self) -> None:
"""Post init processing."""
data_length = len(self.data.value)
address = self.address.value
if address % 8 != 0:
raise ValueError(
f"Data address needs to be doubleword aligned."
f" {address} mod 8 equals {address % 8} and should be 0"
)
if data_length > FirmwareUpdateDataField.NUM_BYTES:
raise ValueError(
f"Data cannot be more than"
f" {FirmwareUpdateDataField.NUM_BYTES} bytes got {data_length}."
)
@classmethod
def create(
cls, address: int, data: bytes, message_index: int = None # type: ignore[assignment]
) -> "FirmwareUpdateData":
"""Create a firmware update data payload."""
# this is a special case, we normally instansiate message_index
# when building a message, not a payload, but we need to compute
# the checksum so we do it here. you should not normally supply
# message index to this function, but i've added it for the unit
# tests so the object can have a predictable checksum
checksum = 0
obj = FirmwareUpdateData(
address=utils.UInt32Field(address),
num_bytes=utils.UInt8Field(len(data)),
reserved=utils.UInt8Field(0),
data=FirmwareUpdateDataField(data),
checksum=utils.UInt16Field(checksum),
)
if message_index is None:
index_generator = message_definitions.SingletonMessageIndexGenerator()
obj.message_index = utils.UInt32Field(index_generator.get_next_index())
else:
obj.message_index = utils.UInt32Field(message_index)
checksum = (1 + ~sum(obj.serialize())) & 0xFFFF
obj.checksum.value = checksum
return obj
@dataclass(eq=False)
class FirmwareUpdateDataAcknowledge(FirmwareUpdateWithAddress):
"""A FW update data acknowledge payload."""
error_code: ErrorCodeField
@dataclass(eq=False)
class FirmwareUpdateComplete(EmptyPayload):
"""All data messages have been transmitted."""
num_messages: utils.UInt32Field
crc32: utils.UInt32Field
@dataclass(eq=False)
class FirmwareUpdateAcknowledge(EmptyPayload):
"""A response to a firmware update message with an error code."""
error_code: ErrorCodeField
@dataclass(eq=False)
class FirmwareUpdateStatus(EmptyPayload):
"""A response to the FirmwareUpdateStatusRequest message."""
flags: utils.UInt32Field
@dataclass(eq=False)
class GetLimitSwitchResponse(EmptyPayload):
"""A response to the Limit Switch Status request payload."""
switch_status: utils.UInt8Field
@dataclass(eq=False)
class SensorPayload(EmptyPayload):
"""Take a single reading from a sensor request payload."""
sensor: SensorTypeField
sensor_id: SensorIdField
@dataclass(eq=False)
class ReadFromSensorRequestPayload(SensorPayload):
"""Take a single reading from a sensor request payload."""
offset_reading: utils.UInt8Field
@dataclass(eq=False)
class WriteToSensorRequestPayload(SensorPayload):
"""Write a piece of data to a sensor request payload."""
data: utils.UInt32Field
reg_address: utils.UInt8Field
@dataclass(eq=False)
class BaselineSensorRequestPayload(SensorPayload):
"""Provide a specified amount of readings to take the average of the current sensor."""
number_of_reads: utils.UInt16Field
@dataclass(eq=False)
class BaselineSensorResponsePayload(SensorPayload):
"""A response containing an averaged offset reading from a sensor."""
offset_average: utils.Int32Field
@dataclass(eq=False)
class ReadFromSensorResponsePayload(SensorPayload):
"""A response for either a single reading or an averaged reading of a sensor."""
sensor_data: utils.Int32Field
@dataclass(eq=False)
class SetSensorThresholdRequestPayload(SensorPayload):
"""A request to set the threshold value of a sensor."""
threshold: utils.Int32Field
mode: SensorThresholdModeField
@dataclass(eq=False)
class SensorThresholdResponsePayload(SensorPayload):
"""A response that sends back the current threshold value of the sensor."""
threshold: utils.Int32Field
mode: SensorThresholdModeField
@dataclass(eq=False)
class SensorDiagnosticRequestPayload(SensorPayload):
"""A response that sends back the current threshold value of the sensor."""
reg_address: utils.UInt8Field
@dataclass(eq=False)
class SensorDiagnosticResponsePayload(SensorPayload):
"""A response that sends back the current threshold value of the sensor."""
reg_address: utils.UInt8Field
data: utils.UInt32Field
@dataclass(eq=False)
class BindSensorOutputRequestPayload(SensorPayload):
"""A request to link a GPIO pin output to a sensor threshold."""
binding: SensorOutputBindingField
@dataclass(eq=False)
class BindSensorOutputResponsePayload(SensorPayload):
"""A response that sends back the current binding for a sensor."""
binding: SensorOutputBindingField
@dataclass(eq=False)
class PipetteInfoResponsePayload(EmptyPayload):
"""A response carrying data about an attached pipette."""
name: PipetteNameField
model: utils.UInt16Field
serial: SerialDataCodeField
@dataclass(eq=False)
class BrushedMotorVrefPayload(EmptyPayload):
"""A request to set the reference voltage of a brushed motor."""
v_ref: utils.UInt32Field
@dataclass(eq=False)
class BrushedMotorPwmPayload(EmptyPayload):
"""A request to set the pwm of a brushed motor."""
duty_cycle: utils.UInt32Field
@dataclass(eq=False)
class BrushedMotorConfPayload(EmptyPayload):
"""A response carrying data about a brushed motor driver."""
v_ref: utils.UInt32Field
duty_cycle: utils.UInt32Field
@dataclass(eq=False)
class GripperInfoResponsePayload(EmptyPayload):
"""A response carrying data about an attached gripper."""
model: utils.UInt16Field
serial: SerialDataCodeField
@dataclass(eq=False)
class GripperMoveRequestPayload(AddToMoveGroupRequestPayload):
"""A request to move gripper."""
duty_cycle: utils.UInt32Field
encoder_position_um: utils.Int32Field
@dataclass(eq=False)
class GripperErrorTolerancePayload(EmptyPayload):
"""A request to update the position error tolerance of the gripper."""
max_pos_error_mm: utils.UInt32Field
max_unwanted_movement_mm: utils.UInt32Field
@dataclass(eq=False)
class PushTipPresenceNotificationPayload(EmptyPayload):
"""A notification that the ejector flag status has changed."""
ejector_flag_status: utils.UInt8Field
@dataclass(eq=False)
class TipActionRequestPayload(AddToMoveGroupRequestPayload):
"""A request to perform a tip action."""
velocity: utils.Int32Field
action: PipetteTipActionTypeField
request_stop_condition: MoveStopConditionField
@dataclass(eq=False)
class TipActionResponsePayload(MoveCompletedPayload):
"""A response that sends back whether tip action was successful."""
action: PipetteTipActionTypeField
success: utils.UInt8Field
gear_motor_id: GearMotorIdField
@dataclass(eq=False)
class PeripheralStatusResponsePayload(SensorPayload):
"""A response that sends back the initialization status of a peripheral device."""
status: utils.UInt8Field
@dataclass(eq=False)
class SerialNumberPayload(EmptyPayload):
"""A payload with a serial number."""
serial: SerialField
@dataclass(eq=False)
class _GetMotorUsageResponsePayloadBase(EmptyPayload):
num_elements: utils.UInt8Field
@dataclass(eq=False)
class GetMotorUsageResponsePayload(_GetMotorUsageResponsePayloadBase):
"""A payload with motor lifetime usage."""
@classmethod
def build(cls, data: bytes) -> "GetMotorUsageResponsePayload":
"""Build a response payload from incoming bytes.
This override is required to handle responses with multiple values.
"""
consumed = _GetMotorUsageResponsePayloadBase.get_size()
superdict = asdict(_GetMotorUsageResponsePayloadBase.build(data))
num_elements = superdict["num_elements"]
message_index = superdict.pop("message_index")
usage_values: List[MotorUsageTypeField] = []
for i in range(num_elements.value):
usage_values.append(
MotorUsageTypeField.build(
data[consumed : consumed + MotorUsageTypeField.NUM_BYTES]
)
)
consumed = consumed + MotorUsageTypeField.NUM_BYTES
inst = cls(**superdict, usage_elements=usage_values)
inst.message_index = message_index
return inst
usage_elements: List[MotorUsageTypeField]
| [
"[email protected]"
]
| |
658de4de25f891219241d561ec8ea7df8cf7369b | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/aiohttp/worker.py | deb68ced7f3cfc29dfc78cdc8d3e211d3f00629f | []
| no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/09/bc/75/2b25658a0bd66ba9237b48525e1763809d403912cdd6abbca89d0bcd24 | [
"[email protected]"
]
| |
51e616e51e931fd86dabb8d7893a8226d10e3bb7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_minion.py | 043f4450fe5016c580399725d455d2292bd254fe | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py |
#calss header
class _MINION():
def __init__(self,):
self.name = "MINION"
self.definitions = [u'a person who is not important and who has to do what another person of higher rank orders them to do: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
01546daeac217a5c225258efff378b7d9a7da2b1 | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Yelp/SearchForBusiness.py | 1c3fc99c24d7c29ab3d39126b3e66458540807c7 | []
| no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,654 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# SearchForBusiness
# Retrieves information for a given business id or name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchForBusiness(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchForBusiness Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchForBusiness, self).__init__(temboo_session, '/Library/Yelp/SearchForBusiness')
def new_input_set(self):
return SearchForBusinessInputSet()
def _make_result_set(self, result, path):
return SearchForBusinessResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchForBusinessChoreographyExecution(session, exec_id, path)
class SearchForBusinessInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchForBusiness
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_BusinessId(self, value):
"""
Set the value of the BusinessId input for this Choreo. ((conditional, string) The business id to return results for. This can be found in the URL when you're on the business page on yelp.com (i.e. "yelp-san-francisco"). This is required unless using the BusinessName input.)
"""
super(SearchForBusinessInputSet, self)._set_input('BusinessId', value)
def set_BusinessName(self, value):
"""
Set the value of the BusinessName input for this Choreo. ((conditional, string) A business name to search for. This is required unless using the BusinessId input.)
"""
super(SearchForBusinessInputSet, self)._set_input('BusinessName', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((optional, string) The category to filter search results with when searching by BusinessName. This can be a list of comma delimited categories. For example, "bars,french". This can used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Category', value)
def set_City(self, value):
"""
Set the value of the City input for this Choreo. ((conditional, string) The name of the city in which to search for businesses. This is required when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('City', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('ConsumerSecret', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) The number of business results to return when searching by BusinessName. The maxiumum is 20.)
"""
super(SearchForBusinessInputSet, self)._set_input('Count', value)
def set_CountryCode(self, value):
"""
Set the value of the CountryCode input for this Choreo. ((optional, string) The ISO 3166-1 2-digit country code to use when parsing the location field. United States = US, Canada = CA, United Kingdom = GB. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('CountryCode', value)
def set_Deals(self, value):
"""
Set the value of the Deals input for this Choreo. ((optional, string) Set to "true" to exclusively search for businesses with deals. This can used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Deals', value)
def set_LanguageCode(self, value):
"""
Set the value of the LanguageCode input for this Choreo. ((optional, string) The ISO 639 language code. Default to "en". Reviews and snippets written in the specified language will be returned. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('LanguageCode', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Offsets the list of returned business results by this amount when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Offset', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, multiline) The format of the response from Yelp, either XML or JSON (the default).)
"""
super(SearchForBusinessInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, integer) The sort mode: 0 = Best matched, 1 = Distance (default), 2 = Highest Rated. This can be used when searching by BusinessName.)
"""
super(SearchForBusinessInputSet, self)._set_input('Sort', value)
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((required, string) The Token provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('Token', value)
def set_TokenSecret(self, value):
"""
Set the value of the TokenSecret input for this Choreo. ((required, string) The Token Secret provided by Yelp.)
"""
super(SearchForBusinessInputSet, self)._set_input('TokenSecret', value)
class SearchForBusinessResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchForBusiness Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Yelp. Corresponds to the input value for ResponseFormat (defaults to JSON).)
"""
return self._output.get('Response', None)
class SearchForBusinessChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchForBusinessResultSet(response, path)
| [
"[email protected]"
]
| |
cf01f1e148b59200162b21c0e689315de517efa4 | e8878773d36892c74d6bbe4c257a57e23fb9e7e8 | /backend/location/admin.py | 6988b9097daa65c15498009d314ee94c58f8cb9d | []
| no_license | crowdbotics-apps/logintest-29319 | 8831f9a4f85292518b794406c4082b7212b736a5 | 45e9ec4fa8fce8e04caa98e5bc13ebfed81baaf3 | refs/heads/master | 2023-06-30T11:25:57.766718 | 2021-08-01T17:19:28 | 2021-08-01T17:19:28 | 391,687,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.contrib import admin
from .models import TaskerLocation, MapLocation, CustomerLocation, TaskLocation
admin.site.register(MapLocation)
admin.site.register(TaskerLocation)
admin.site.register(CustomerLocation)
admin.site.register(TaskLocation)
# Register your models here.
| [
"[email protected]"
]
| |
0ccc67ad1fb04469ff08da9bd5f6c0209718d728 | cac9947cec2aace94fb4a7c69fd32654bb53e853 | /bin/qr | 56709d197d5f6784415fb47f7327c01facbaa6e7 | [
"BSD-3-Clause"
]
| permissive | nguyentranhoan/uit-mobile | 7bc1a020251ca583fe11cf1f729630466203537a | 8546312b01373d94cf00c64f7eacb769e0f4ccce | refs/heads/master | 2023-01-05T03:29:54.732892 | 2020-04-08T07:14:32 | 2020-04-08T07:14:32 | 235,015,840 | 0 | 0 | BSD-3-Clause | 2022-12-27T15:35:57 | 2020-01-20T04:04:47 | Python | UTF-8 | Python | false | false | 251 | #!/home/jay/Desktop/uit/uit_mobile/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from qrcode.console_scripts import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
2df038e4cf1c2ec2746999fc1b87d64d610b8b04 | f846b6997562165df3a0463bad9728395db5ec8e | /app/pages/base/page.py | 3e373cfaa83b3e524d2f0c6abd32e23c5efaa536 | []
| no_license | gonza56d/burner | 9f687485a4728bcbc85e1fc5918be2eeed4fc8f8 | c21f4d17215b47c3a8bfba460daf71505185b568 | refs/heads/master | 2023-08-04T17:27:11.883836 | 2021-07-04T21:26:08 | 2021-07-04T21:26:08 | 408,569,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | # Python
from abc import ABC, abstractmethod
from typing import Generator
class BasePage(ABC):
"""Abstract class that implements how to get and store the data regarding
subclass attributes.
"""
@abstractmethod
def get_page_name(self) -> str:
"""Implement the name of the page to store results from.
Return
------
str : Name of the page.
"""
pass
@abstractmethod
def get_categories_storage_filename(self) -> str:
"""Implement the name under which the categories CSV files are stored.
Return
------
str : Filename for categories CSVs.
"""
pass
@abstractmethod
def get_products_storage_filename(self) -> str:
"""Implement the name under which the products CSV files are stored.
Return
------
str : Filename for products CSVs.
"""
pass
@property
@abstractmethod
def furnitures_categories(self) -> Generator:
"""Implement how to get furnitures categories from page.
Return
------
Generator : yield from furnitures found.
"""
pass
@abstractmethod
def get_product_id_lookup(soup_product) -> str:
"""Implement lookup to find product id.
Return
------
str : ID of the given BS4 product.
"""
pass
@abstractmethod
def get_product_url_lookup(soup_product) -> str:
"""Implement lookup to find product url.
Return
------
str : URL of the given BS4 product.
"""
pass
@abstractmethod
def get_product_name_lookup(soup_product) -> str:
"""Implement lookup to find product name.
Return
------
str : Name of the given BS4 product.
"""
pass
@abstractmethod
def get_product_price_lookup(soup_product) -> float:
"""Implement lookup to find product price.
Return
------
float : Price of the given BS4 product.
"""
pass
| [
"[email protected]"
]
| |
6e8c4b912b4d24612e702fb71d21bd1cb9d4d22d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_248/ch153_2020_04_13_20_48_31_775267.py | e5e5b914943c693b478c062703701e840b52b37b | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | def agrupa_por_idade(dic):
dic={'João': 10, 'Maria': 8, 'Miguel': 20, 'Helena': 67, 'Alice': 50}
dic2={}
k=dic.values()
for i in k:
if i<=11:
dic2[i]='criança'
elif i>11 and i<18:
dic2[i]='adolescente'
elif i>17 and i<60:
dic2[i]='adulto'
else:
dic2[i]='idoso'
return dic2
| [
"[email protected]"
]
| |
989f1ad768f48f50694a33f86d2df5993591a6e9 | ecb113be53f2fe1768e85a1004d571c74d87ae8d | /dependencies/python/fmlaas/controller/utils/__init__.py | 026fae5f9152e209282ffcd908838d20a86ab5af | []
| no_license | Internet-SmokeAlarm/core | 39351e4d5bddf19bd59faf51bbc225c0e0521905 | 87b66a10042ec41916c490bb20cb4117f3caf1ba | refs/heads/master | 2023-02-17T18:40:12.822530 | 2020-07-05T20:28:38 | 2020-07-05T20:28:38 | 216,093,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from .project_operations import update_experiment
from .project_operations import termination_check
| [
"[email protected]"
]
| |
df94a3aee5b666e7f7de868cf7f4115646df7570 | 3a622d275c968f417eb2f78ce241e99eeb387b30 | /test/common_methods_invocations.py | c4e2d9ea3433f76bf4de644f8269b5992d8f6df3 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | weihuangxu/pytorch | 232e495df8c8e64a2985bdc0d7ab6d8d8f64799f | 4928c7641540c21e40e845c9383681b16093bf1f | refs/heads/master | 2020-04-12T07:55:31.524624 | 2018-12-19T02:23:55 | 2018-12-19T02:26:37 | 162,375,527 | 0 | 0 | NOASSERTION | 2018-12-19T03:04:28 | 2018-12-19T03:04:28 | null | UTF-8 | Python | false | false | 46,996 | py | import torch
from torch._six import inf, nan
from functools import reduce, wraps
from operator import mul, itemgetter
from torch.autograd import Variable, Function, detect_anomaly
from torch.testing import make_non_contiguous
from common_utils import (skipIfNoLapack,
prod_single_zero, random_square_matrix_of_rank,
random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, make_nonzero_det,
random_fullrank_matrix_distinct_singular_value)
def index_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape).mul_(max_indices).floor_().long()
return index
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.LongTensor(*shape)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
def uniform_scalar(offset=0, requires_grad=False):
v = torch.rand(()) + offset
v.requires_grad = requires_grad
return v
def normal_scalar_clamp(amin, amax, requires_grad=False):
v = torch.randn(()).clamp(amin, amax)
v.requires_grad = requires_grad
return v
def prod_zeros(dim_size, dim_select):
assert len(dim_select) == 2
result = torch.randn(dim_size, dim_size, dim_size)
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
class non_differentiable(object):
def __init__(self, tensor):
self.tensor = tensor
class dont_convert(tuple):
pass
class NoArgsClass(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration()
next = __next__ # Python 2 compatibility
def __len__(self):
return 0
NO_ARGS = NoArgsClass()
L = 20
M = 10
S = 5
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name (will be used at test name suffix), // optional
# indices for possible dim arg, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# )
method_tests = [
('add', (S, S, S), ((S, S, S),)),
('add', (S, S, S), ((S, S),), 'broadcast_rhs'),
('add', (S, S), ((S, S, S),), 'broadcast_lhs'),
('add', (S, 1, S), ((M, S),), 'broadcast_all'),
('add', (), ((),), 'scalar'),
('add', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('add', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('add', (S, S, S), (3.14,), 'constant'),
('add', (), (3.14,), 'scalar_constant'),
('__radd__', (S, S, S), (3.14,), 'constant'),
('__radd__', (), (3.14,), 'scalar_constant'),
('sub', (S, S, S), ((S, S, S),)),
('sub', (S, S, S), ((S, S),), 'broadcast_rhs'),
('sub', (S, S), ((S, S, S),), 'broadcast_lhs'),
('sub', (S, 1, S), ((M, S),), 'broadcast_all'),
('sub', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('sub', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('sub', (S, S, S), (3.14,), 'constant'),
('sub', (), (3.14,), 'scalar_constant'),
('__rsub__', (S, S, S), (3.14,), 'constant'),
('__rsub__', (), (3.14,), 'scalar_constant'),
('mul', (S, S, S), ((S, S, S),)),
('mul', (), ((),), 'scalar'),
('mul', (S, S, S), ((S, S),), 'broadcast_rhs'),
('mul', (S, S), ((S, S, S),), 'broadcast_lhs'),
('mul', (S, 1, S), ((M, S),), 'broadcast_all'),
('mul', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('mul', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('mul', (S, S, S), (3.14,), 'constant'),
('mul', (), (3.14,), 'scalar_constant'),
('__rmul__', (S, S, S), (3.14,), 'constant'),
('__rmul__', (), (3.14,), 'scalar_constant'),
('div', (S, S, S), (torch.rand(S, S, S) + 0.1,)),
('div', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs'),
('div', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs'),
('div', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all'),
('div', (), (uniform_scalar(0.1),), 'scalar'),
('div', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs'),
('div', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs'),
('div', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant'),
('__rdiv__', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant'),
('div', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant'),
('__rdiv__', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant'),
('pow', torch.rand(S, S, S) + 1e-3, (torch.rand(S, S, S) + 0.1,)),
('pow', torch.rand(S, S, S) + 1e-3, (torch.rand(1,) + 0.1,), 'broadcast_rhs'),
('pow', torch.rand(1,) + 1e-3, (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs'),
('pow', torch.rand(S, 1, S) + 1e-3, (torch.rand(1, S, 1) + 0.1,), 'broadcast_all'),
('pow', uniform_scalar(1e-3, requires_grad=True), (uniform_scalar(0.1),), 'scalar'),
('pow', torch.rand(S, S, S) + 1e-3, (uniform_scalar(0.1),), 'scalar_broadcast_rhs'),
('pow', uniform_scalar(1e-3, requires_grad=True), (torch.rand(S, S, S) + 0.1,), 'scalar_broadcast_lhs'),
('pow', torch.rand(S, S, S) + 1e-3, (3.14,), 'constant'),
('__rpow__', torch.rand(S, S, S) + 1e-3, (3.14,), 'constant'),
('pow', uniform_scalar(1e-3, requires_grad=True), (3.14,), 'scalar_constant'),
('__rpow__', uniform_scalar(1e-3, requires_grad=True), (3.14,), 'scalar_constant'),
('transpose', (1, 2, 3), (1, 2), 'dim', [0, 1]),
('transpose', (), (0, 0), 'scalar'),
('transpose', (1,), (0, 0), '1d'),
('transpose', torch.rand(L, L), (0, 1), '2d'),
('transpose', torch.rand(S, S, S), (2, 0), '3d'),
('t', (1, 2), NO_ARGS),
('view', (S, S, S), (S * S, S),),
('view', (S, S, S), (torch.Size([S * S, S]),), 'size'),
('view', (S,), (S,), '1d'),
('view', (), (dont_convert(()),), 'scalar_to_scalar'),
('view', (), (1,), 'scalar_to_1d'),
('reshape', (S, S, S), (S * S, S),),
('reshape', (S, S, S), (torch.Size([S * S, S]),), 'size'),
('reshape', (S,), (S,), '1d'),
('reshape', (), (dont_convert(()),), 'scalar_to_scalar'),
('reshape', (), (1,), 'scalar_to_1d'),
('reshape_as', (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
('reshape_as', (), (non_differentiable(torch.tensor(42.)),), 'scalar'),
('reshape_as', (), (non_differentiable(torch.rand(1, 1)),), 'scalar_to_dims'),
('flip', (S, S, S), ([0],), 'd0'),
('flip', (S, S, S), ([0, 1, 2],), 'd012'),
('flip', (S, S, S), ([0, 2],), 'd02'),
('flip', (S, S, S), ([2, 0],), 'd20'),
('flip', (S, S, S), ([-1],), 'neg_d'),
('roll', (S, S, S), (0, 0), 'd0'),
('roll', (S, S, S), (1, 2), 'd12'),
('roll', (S, S, S), (0, 2,), 'd02'),
('roll', (S, S, S), (2, 0,), 'd20'),
('roll', (S, S, S), (-1, 0), 'neg_shift'),
('roll', (S, S, S), (10000, 1), 'loop_shift'),
('roll', (S, S, S), (2,), 'flattened'),
('roll', (S, S, S), ([1, 2, -1], [0, 1, 2]), 'three_dims'),
('rot90', (S, S, S), (1, [0, 1],), 'k1_d01'),
('rot90', (S, S, S), (1, [1, 2],), 'k1_d12'),
('rot90', (S, S, S), (1, [1, -1],), 'k1_neg_d'),
('rot90', (S, S, S), (), 'default'),
('view_as', (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
('view_as', (), (non_differentiable(torch.tensor(5.5)),), 'scalar'),
('view_as', (), (non_differentiable(torch.rand(1, 1)),), 'scalar_to_dims'),
('expand', (S, 1, 1), (S, S, S)),
('expand', (torch.Size([S, 1, S]),), (S, S, S), 'size'),
('expand', (S, 1), (S, S, S), 'new_dim'),
('expand', (1,), (S, S, S), '1_element'),
('expand', (1, S), (1, 1, S), 'new_dim_front_old_front_1'),
('expand', (), (dont_convert(()),), 'scalar_to_scalar'),
('expand', (), (1, 3, 2), 'scalar_to_dims'),
('exp', (S, S, S), NO_ARGS),
('exp', (), NO_ARGS, 'scalar'),
('expm1', (S, S, S), NO_ARGS),
('expm1', (), NO_ARGS, 'scalar'),
('erf', torch.rand(S, S, S), NO_ARGS),
('erf', uniform_scalar(requires_grad=True), NO_ARGS, 'scalar'),
('erfc', torch.rand(S, S, S), NO_ARGS),
('erfc', uniform_scalar(requires_grad=True), NO_ARGS, 'scalar'),
('erfinv', torch.rand(S, S, S).clamp(-0.9, 0.9), NO_ARGS),
('erfinv', normal_scalar_clamp(-0.9, 0.9, requires_grad=True), NO_ARGS, 'scalar'),
('log', torch.rand(S, S, S) + 1e-2, NO_ARGS),
('log', uniform_scalar(1e-2, requires_grad=True), NO_ARGS, 'scalar'),
('log10', torch.rand(S, S, S) + 1e-2, NO_ARGS),
('log10', uniform_scalar(1e-2, requires_grad=True), NO_ARGS, 'scalar'),
('log1p', torch.rand(S, S, S), NO_ARGS),
('log1p', uniform_scalar(requires_grad=True), NO_ARGS, 'scalar'),
('log2', torch.rand(S, S, S) + 1e-2, NO_ARGS),
('log2', uniform_scalar(1e-2, requires_grad=True), NO_ARGS, 'scalar'),
('tanh', (S, S, S), NO_ARGS),
('tanh', (), NO_ARGS, 'scalar'),
('sigmoid', (S, S, S), NO_ARGS),
('sigmoid', (), NO_ARGS, 'scalar'),
('sinh', (S, S, S), NO_ARGS),
('sinh', (), NO_ARGS, 'scalar'),
('cosh', (S, S, S), NO_ARGS),
('cosh', (), NO_ARGS, 'scalar'),
('abs', (S, S, S), NO_ARGS),
('abs', (), NO_ARGS, 'scalar'),
('clamp', (S, S, S), (0, 1)),
('clamp', (S, S, S), (None, 0.5), 'min'),
('clamp', (S, S, S), (0.5, None), 'max'),
('clamp', (), (0, 1), 'scalar'),
('clamp', (), (None, 0.5), 'min_scalar'),
('clamp', (), (0.5, None), 'max_scalar'),
('sqrt', torch.rand(S, S, S) + 5e-4, NO_ARGS),
('sqrt', uniform_scalar(5e-4, requires_grad=True), NO_ARGS, 'scalar'),
('sin', (S, S, S), NO_ARGS),
('sin', (), NO_ARGS, 'scalar'),
('cos', (S, S, S), NO_ARGS),
('cos', (), NO_ARGS, 'scalar'),
('tan', torch.randn(S, S, S).clamp(-1, 1), NO_ARGS),
('asin', torch.randn(S, S, S).clamp(-0.9, 0.9), NO_ARGS),
('acos', torch.randn(S, S, S).clamp(-0.9, 0.9), NO_ARGS),
('atan', (S, S, S), NO_ARGS),
('atan', (), NO_ARGS, 'scalar'),
('atan2', (S, S, S), ((S, S, S),)),
('atan2', (), ((),), 'scalar'),
('atan2', (S, S, S), ((S,),), 'broadcast_rhs'),
('atan2', (S,), ((S, S, S),), 'broadcast_lhs'),
('atan2', (S, 1, S), ((S, S),), 'broadcast_all'),
('reciprocal', torch.rand(S, S, S) + 0.1, NO_ARGS),
('reciprocal', uniform_scalar(0.1, requires_grad=True), NO_ARGS, 'scalar'),
('round', (S, S, S), NO_ARGS),
('round', (), NO_ARGS, 'scalar'),
('sign', (S, S, S), NO_ARGS),
('sign', (), NO_ARGS, 'scalar'),
('trunc', (S, S, S), NO_ARGS),
('trunc', (), NO_ARGS, 'scalar'),
('floor', (S, S, S), NO_ARGS),
('floor', (), NO_ARGS, 'scalar'),
('ceil', (S, S, S), NO_ARGS),
('ceil', (), NO_ARGS, 'scalar'),
('rsqrt', torch.rand(S, S, S) + 1e-2, NO_ARGS),
('rsqrt', uniform_scalar(1e-2, requires_grad=True), NO_ARGS, 'scalar'),
('frac', (S, S, S), NO_ARGS),
('frac', (), NO_ARGS, 'scalar'),
('fmod', (S, S, S), (1.5,)),
('fmod', (), (1.5,), 'scalar'),
('fmod', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),
('fmod', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),
('fmod', (S, S, S), (non_differentiable(torch.rand(S) + 1.5),), 'tensor_broadcast_rhs'),
('fmod', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),
('fmod', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),
('fmod', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),
('fmod', (S, S, S), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor_broadcast_rhs'),
('remainder', (S, S, S), (1.5,)),
('remainder', (), (1.5,), 'scalar'),
('remainder', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),
('remainder', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),
('remainder', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),
('remainder', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),
('remainder', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),
('lerp', (S, S, S), ((S, S, S), 0.4)),
('lerp', (S, S, S), ((S,), 0.4), 'broadcast_rhs'),
('lerp', (S,), ((S, S, S), 0.4), 'broadcast_lhs'),
('lerp', (S, 1, S), ((S, S), 0.4), 'broadcast_all'),
('lerp', (), ((), 0.4), 'scalar'),
('lerp', (S, S, S), ((), 0.4), 'scalar_broadcast_rhs'),
('lerp', (), ((S, S, S), 0.4), 'scalar_broadcast_lhs'),
('max', (S, S, S), NO_ARGS),
('max', (S, S, S), (1,), 'dim', [0]),
('max', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('max', (), NO_ARGS, 'scalar'),
('max', (), (0,), 'scalar_dim', [0]),
('max', (), (0, True,), 'scalar_keepdim_dim', [0]),
('max', (S, S, S), ((S, S, S),), 'elementwise'),
('max', (S, S, S), ((S,),), 'elementwise_broadcast_rhs'),
('max', (S,), ((S, S, S),), 'elementwise_broadcast_lhs'),
('max', (S, 1, S), ((S, S),), 'elementwise_broadcast_all'),
('max', (), ((),), 'scalar_elementwise'),
('max', (S, S, S), ((),), 'scalar_elementwise_broadcast_rhs'),
('max', (), ((S, S, S),), 'scalar_elementwise_broadcast_lhs'),
('min', (S, S, S), NO_ARGS),
('min', (S, S, S), (1,), 'dim', [0]),
('min', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('min', (), NO_ARGS, 'scalar'),
('min', (), (0,), 'scalar_dim', [0]),
('min', (), (0, True,), 'scalar_keepdim_dim', [0]),
('min', (S, S, S), ((S, S, S),), 'elementwise'),
('min', (S, S, S), ((S,),), 'elementwise_broadcast_rhs'),
('min', (S,), ((S, S, S),), 'elementwise_broadcast_lhs'),
('min', (S, 1, S), ((S, S),), 'elementwise_broadcast_all'),
('min', (), ((),), 'scalar_elementwise'),
('min', (S, S, S), ((),), 'scalar_elementwise_broadcast_rhs'),
('min', (), ((S, S, S),), 'scalar_elementwise_broadcast_lhs'),
('mean', (S, S, S), NO_ARGS),
('mean', (S, S, S), (1,), 'dim', [0]),
('mean', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('mean', (), NO_ARGS, 'scalar'),
('mean', (), (0,), 'scalar_dim', [0]),
('mean', (), (0, True,), 'scalar_keepdim_dim', [0]),
('kthvalue', (S, S, S), (2,)),
('kthvalue', (), (1,), 'scalar'),
('kthvalue', (S, S, S), (2, 1,), 'dim', [1]),
('kthvalue', (), (1, 0,), 'scalar_dim', [1]),
('kthvalue', (S, S, S), (2, 1, True,), 'keepdim_dim', [1]),
('kthvalue', (), (1, 0, True), 'scalar_keepdim_dim', [1]),
('kthvalue', (S,), (2, 0,), 'dim_1d', [1]),
('kthvalue', (S,), (2, 0, True,), 'keepdim_dim_1d', [1]),
('median', (S, S, S), NO_ARGS),
('median', (S, S, S), (1,), 'dim', [0]),
('median', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('median', (), NO_ARGS, 'scalar'),
('median', (), (0,), 'scalar_dim', [0]),
('median', (), (0, True,), 'scalar_keepdim_dim', [0]),
('mode', (S, S, S), NO_ARGS),
('mode', (S, S, S), (1,), 'dim', [0]),
('mode', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('mode', (), NO_ARGS, 'scalar'),
('mode', (), (0,), 'scalar_dim', [0]),
('mode', (), (0, True,), 'scalar_keepdim_dim', [0]),
('sum', (S, S, S), NO_ARGS),
('sum', (S, S, S), (1,), 'dim', [0]),
('sum', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('sum', (), NO_ARGS, 'scalar'),
('sum', (), (0,), 'scalar_dim', [0]),
('sum', (), (0, True,), 'scalar_keepdim_dim', [0]),
('sum', (S, S, S), ([1, 2],), 'multi_dim'),
('sum', (S, S, S), ([1, 2], True,), 'multi_dim_keepdim'),
('prod', (S, S, S), NO_ARGS),
('prod', (S, S, S), (1,), 'dim', [0]),
('prod', (S, S, S), (1, True,), 'keepdim_dim', [0]),
('prod', (), NO_ARGS, 'scalar'),
('prod', (), (0,), 'scalar_dim', [0]),
('prod', (), (0, True,), 'scalar_keepdim_dim', [0]),
('prod', prod_zeros(S, [0, 1]), NO_ARGS, 'zerodims2'),
('prod', prod_zeros(S, [0, 2]), NO_ARGS, 'zerodims1'),
('prod', prod_zeros(S, [1, 2]), NO_ARGS, 'zerodims0'),
('prod', prod_zeros(S, [0, 1]), (1,), 'zeros_dims2', [0]),
('prod', prod_zeros(S, [0, 2]), (1,), 'zeros_dims1', [0]),
('prod', prod_zeros(S, [1, 2]), (1,), 'zeros_dims0', [0]),
('prod', prod_zeros(S, [0, 1]), (1, True), 'keepdim_zeros_dims2', [0]),
('prod', prod_zeros(S, [0, 2]), (1, True), 'keepdim_zeros_dims1', [0]),
('prod', prod_zeros(S, [1, 2]), (1, True), 'keepdim_zeros_dims0', [0]),
('prod', prod_single_zero(S), NO_ARGS, 'single_zero'),
('prod', (torch.tensor(0., requires_grad=True)), NO_ARGS, 'scalar_zero'),
('prod', (torch.tensor(0., requires_grad=True)), (0,), 'scalar_dim_zero', [0]),
('prod', (torch.tensor(0., requires_grad=True)), (0, True,), 'scalar_keepdim_dim_zero', [0]),
('var', (S, S, S), NO_ARGS),
('var', (S, S, S), (1,), 'dim', [0]),
('var', (S, S, S), (1, True, True), 'keepdim_dim', [0]),
('var', (S,), (0,), 'dim_1d', [0]),
('var', (S,), (0, True, True), 'keepdim_dim_1d', [0]),
('std', (S, S, S), NO_ARGS),
('std', (S, S, S), (1,), 'dim', [0]),
('std', (S, S, S), (1, True, True), 'keepdim_dim', [0]),
('std', (S,), (0,), 'dim_1d', [0]),
('std', (S,), (0, True, True), 'keepdim_dim_1d', [0]),
('renorm', (S, S, S), (2, 1, 0.5), 'dim', [1]),
('renorm', (S, S, S), (1, 2, 3), 'norm_1'),
('renorm', (S, S, S), (inf, 2, 0.5), 'norm_inf'),
('repeat', (S,), (2,), 'single_number'),
('repeat', (), (2, 3), 'scalar'),
('repeat', (2, 2), (3, 2)),
('repeat', (2, 2), (1, 3, 1, 2), 'unsqueeze'),
('cumsum', (S, S, S), (0,), 'dim0', [0]),
('cumsum', (S, S, S), (1,), 'dim1', [0]),
('cumsum', (S, S, S), (1,), 'dim1_cast', [0], (), lambda x: x, {'dtype': torch.float64}),
('cumsum', (), (0,), 'dim0_scalar', [0]),
('cumprod', (S, S, S), (0,)),
('cumprod', (S, S, S), (1,), 'dim1', [0]),
('cumprod', (), (0,), 'scalar'),
('cumprod', (torch.tensor(0., requires_grad=True)), (0,), 'scalar_zeros'),
('cumprod', prod_zeros(S, [0, 1]), (1,), 'zeros_dim2', [0]),
('cumprod', prod_zeros(S, [0, 2]), (1,), 'zeros_dim1', [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0', [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0_cast', [0], (), lambda x: x, {'dtype': torch.float64}),
('unfold', (), (0, 1, 1), 'scalar', [0]),
('unfold', (S, S, S, S), (1, 3, 1), '', [0]),
('unfold', (S, S, S), (2, 3, 2), 'lastdim', [0]),
('addmm', (S, M), ((S, S), (S, M)),),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs'),
('addmm', (S, M), ((S, S), (S, M)), 'coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs'),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (S, M), ((S, S, S), (S, S, M)),),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('addbmm', (S, M), ((S, S, S), (S, S, M)), 'coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), lambda x: x,
{'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)),),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)), 'coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), lambda x: x,
{'beta': 0.2, 'alpha': 0.6}),
('addmv', (S,), ((S, M), (M,)),),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs'),
('addmv', (S,), ((S, M), (M,)), 'coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs'),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addr', (S, M), ((S,), (M,)),),
('addr', (), ((S,), (M,)), 'broadcast_lhs'),
('addr', (S, M), ((S,), (M,)), 'coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('addr', (), ((S,), (M,)), 'broadcast_lhs_coef', (), (), lambda x: x, {'beta': 0.2, 'alpha': 0.6}),
('dot', (L,), ((L,),),),
('mm', (S, M), ((M, S),)),
('bmm', (M, S, M), ((M, M, S),)),
('mv', (S, M), ((M,),)),
('ger', (S,), ((M,),)),
('matmul', (L,), ((L,),),),
('matmul', (S, M), ((M,),), "2d_1d"),
('matmul', (M, ), ((M, S),), "1d_2d"),
('matmul', (S, M), ((M, S),), "2d_2d"),
('matmul', (S, S, M, M), ((S, S, M, S),), "4d_4d"),
('matmul', (S, S, M, M), ((M,),), "4d_1d"),
('matmul', (M,), ((S, S, M, S),), "1d_4d"),
('matrix_power', (S, S), [2], "n=2"),
('matrix_power', (S, S, S), [3], "n=3"),
('matrix_power', (S, S, S), [1], "n=1"),
('matrix_power', (S, S, S), [0], "n=0"),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S), [-1], "n=-1",
NO_ARGS, [skipIfNoLapack]),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S), [-3], "n=-3",
NO_ARGS, [skipIfNoLapack]),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S, S), [-2], "n=-2",
NO_ARGS, [skipIfNoLapack]),
('mvlgamma', torch.empty(S,).uniform_(0.5, 1), [1], "p=1"),
('mvlgamma', torch.empty(S,).uniform_(1, 2), [2], "p=2"),
('mvlgamma', torch.empty(S, S).uniform_(1.5, 3), [3], "p=3"),
('mvlgamma', torch.empty(S, S).uniform_(2.5, 5), [5], "p=5"),
('addcmul', (S, S), ((S, S), (S, S))),
('addcmul', (S, S), ((S, 1), (1, S)), 'broadcast_rhs'),
('addcmul', (1,), ((S, S, 1), (1, S)), 'broadcast_all'),
('addcmul', (S, S), ((S, S), (S, S)), 'scale', (), (), lambda x: x, {'value': 0.5}),
('addcmul', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (), (), lambda x: x, {'value': 0.5}),
('addcmul', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (), (), lambda x: x, {'value': 0.5}),
('addcmul', (), ((), ()), 'scalar'),
('addcmul', (S, S), ((), ()), 'scalar_broadcast_rhs'),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs'),
('addcmul', (), ((), ()), 'scalar_scale', (), (), lambda x: x, {'value': 0.5}),
('addcmul', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (), (), lambda x: x, {'value': 0.5}),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (S, S), ((S, S), (S, S))),
('addcdiv', (S, S), ((S, 1), (1, S)), 'broadcast_rhs'),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'broadcast_all'),
('addcdiv', (S, S), ((S, S), (S, S)), 'scale', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (), ((), ()), 'scalar'),
('addcdiv', (S, S), ((), ()), 'scalar_broadcast_rhs'),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs'),
('addcdiv', (), ((), ()), 'scalar_scale', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (), (), lambda x: x, {'value': 0.5}),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (), (), lambda x: x, {'value': 0.5}),
('zero_', (S, S, S), NO_ARGS),
('zero_', (), NO_ARGS, 'scalar'),
('logsumexp', (S, S), (1,)),
('logsumexp', (), (0,), 'scalar'),
('norm', (S, S), (), 'default'),
('norm', (S, S), (2,), '2'),
('norm', (S, S), (0,), '0'),
('norm', (S, S), (0.5,), '0_5'),
('norm', (S, S), (1,), '1'),
('norm', (S, S), (3,), '3'),
('norm', (S, S), (inf,), 'inf'),
('norm', (S, S), (-inf,), '-inf'),
('norm', (S, S), ('fro',), 'fro_default'),
('norm', (S, S), ('fro', [0, 1],), 'fro'),
('norm', (S, S), ('nuc',), 'nuc', NO_ARGS, [skipIfNoLapack]),
('norm', (S, S), (-1,), 'neg_1'),
('norm', (S, S), (-2,), 'neg_2'),
('norm', (S, S), (-0.5,), 'neg_0_5'),
('norm', (S, S), (-1.5,), 'neg_1_5'),
('norm', (S, S), (-2, 1,), 'neg_2_2_dim', [1]),
('norm', (S, S), (-1, 1,), 'neg_1_2_dim', [1]),
('norm', (S, S), (0, 1,), '0_2_dim', [1]),
('norm', (S, S), (1, 1,), '1_2_dim', [1]),
('norm', (S, S), (2, 1,), '2_2_dim', [1]),
('norm', (S, S), (3, 1,), '3_2_dim', [1]),
('norm', (S, S), (inf, 1,), 'inf_2_dim'),
('norm', torch.rand(S, S, S) + 5e-2, (1.5,), '1_5_default'),
('norm', (S, S, S), (2, 1), '2_dim', [1]),
('norm', (S, S, S), (3, 1), '3_dim', [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1), '1_5_dim', [1]),
('norm', (S, S, S), (2, 1, True), 'keepdim_2_dim', [1]),
('norm', (S, S, S), (3, 1, True), 'keepdim_3_dim', [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1, True), 'keepdim_1_5_dim', [1]),
('norm', (), (2, 0), '2_dim_scalar', [1]),
('norm', (), (3, 0), '3_dim_scalar', [1]),
('norm', (), (2, 0, True), 'keepdim_2_dim_scalar', [1]),
('norm', (), (3, 0, True), 'keepdim_3_dim_scalar', [1]),
('clone', (S, M, S), NO_ARGS),
('clone', (), NO_ARGS, 'scalar'),
('dist', (S, S, S), ((S, S, S),)),
('dist', (S, S, S), ((S,),), 'broadcast_rhs'),
('dist', (S,), ((S, S, S),), 'broadcast_lhs'),
('dist', (S, 1, S), ((S, S),), 'broadcast_all'),
('dist', (), ((),), 'scalar'),
('dist', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('dist', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('dist', (S, S, S), ((S, S, S), 4), '4'),
('dist', (S, S, S), ((S,), 4), '4_broadcast_rhs'),
('dist', (S,), ((S, S, S), 4), '4_broadcast_lhs'),
('dist', (S, 1, S), ((S, S), 4), '4_broadcast_all'),
('dist', (), ((), 4), 'scalar_4'),
('dist', (S, S, S), ((), 4), 'scalar_4_broadcast_rhs'),
('dist', (), ((S, S, S), 4), 'scalar_4_broadcast_lhs'),
('diag', (M, M), NO_ARGS, '2d'),
('diag', (3, 5), NO_ARGS, '2d_wide'),
('diag', (3, 5), (2,), '2d_wide_pos'),
('diag', (3, 5), (-2,), '2d_wide_neg'),
('diag', (5, 3), NO_ARGS, '2d_tall'),
('diag', (5, 3), (2,), '2d_tall_pos'),
('diag', (5, 3), (-2,), '2d_tall_neg'),
('diag', (M,), NO_ARGS, '1d'),
('diag', (M, M), (1,), '2d_1'),
('diag', (M, M), (2,), '2d_2'),
('diag_embed', (S, S), NO_ARGS),
('diagonal', (M, M), NO_ARGS, '2d'),
('diagonal', (3, 5), NO_ARGS, '2d_wide'),
('diagonal', (3, 5), (2,), '2d_wide_pos'),
('diagonal', (3, 5), (-2,), '2d_wide_neg'),
('diagonal', (5, 3), NO_ARGS, '2d_tall'),
('diagonal', (5, 3), (2,), '2d_tall_pos'),
('diagonal', (5, 3), (-2,), '2d_tall_neg'),
('diagonal', (M, M), (1,), '2d_1'),
('diagonal', (M, M), (2,), '2d_2'),
('diagonal', (M, M, M), (1, 1, 2), '3d_1'),
('diagonal', (M, M, M), (2, 0, 1), '3d_2'),
('diagonal', (M, M, M), (-2, 0, 1), '3d_3'),
('tril', (M, M), NO_ARGS),
('tril', (M, M), (2,), 'idx'),
('triu', (M, M), NO_ARGS),
('triu', (M, M), (2,), 'idx'),
('trace', (M, M), NO_ARGS),
('cross', (S, 3), ((S, 3),)),
('cross', (S, 3, S), ((S, 3, S), 1), 'dim'),
('index_select', (S, S, S), (0, index_variable(2, S)), 'dim', [0]),
('index_select', (), (0, torch.tensor([0], dtype=torch.int64)), 'scalar_mixed_dim', [0]),
('index_select', (), (0, torch.tensor(0, dtype=torch.int64)), 'scalar_dim', [0]),
('index_add', (S, S), (0, index_variable(2, S), (2, S)), 'dim', [0]),
('index_add', (), (0, torch.tensor([0], dtype=torch.int64), torch.tensor([2.])), 'scalar_input_dim', [0]),
('index_add', (), (0, torch.tensor(0, dtype=torch.int64), torch.tensor(2.)), 'scalar_all_dim', [0]),
('index_copy', (S, S), (0, index_perm_variable(2, S), (2, S)), 'dim', [0]),
('index_copy', (), (0, torch.tensor([0], dtype=torch.int64), torch.tensor([2.])), 'scalar_input_dim', [0]),
('index_copy', (), (0, torch.tensor(0, dtype=torch.int64), torch.tensor(2.)), 'scalar_all_dim', [0]),
('index_fill', (S, S), (0, index_variable(2, S), 2), 'dim', [0]),
# FIXME: we should compute the derivative w.r.t torch.tensor(2)
('index_fill', (S, S), (0, index_variable(2, S), non_differentiable(torch.tensor(2))),
'variable_dim', [0]),
('index_fill', (S, S), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_index_dim', [0]),
('index_fill', (), (0, torch.tensor([0], dtype=torch.int64), 2), 'scalar_input_dim', [0]),
('index_fill', (), (0, torch.tensor(0, dtype=torch.int64), 2), 'scalar_both_dim', [0]),
('inverse', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS, '', NO_ARGS, [skipIfNoLapack]),
('inverse', lambda: random_fullrank_matrix_distinct_singular_value(S, 2, 3),
NO_ARGS, 'batched', NO_ARGS, [skipIfNoLapack]),
('det', (S, S), NO_ARGS, '', NO_ARGS, [skipIfNoLapack]),
('det', (1, 1), NO_ARGS, '1x1', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_symmetric_matrix(S), NO_ARGS, 'symmetric', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_symmetric_psd_matrix(S), NO_ARGS, 'symmetric_psd', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_symmetric_pd_matrix(S), NO_ARGS, 'symmetric_pd', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_square_matrix_of_rank(S, S - 2), NO_ARGS, 'dim2_null', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_square_matrix_of_rank(S, 1), NO_ARGS, 'rank1', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_square_matrix_of_rank(S, 2), NO_ARGS, 'rank2', NO_ARGS, [skipIfNoLapack]),
('det', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS,
'distinct_singular_values', NO_ARGS, [skipIfNoLapack]),
# For `logdet` and `slogdet`, the function at det=0 is not smooth.
# We need to exclude tests with det=0 (e.g. dim2_null, rank1, rank2) and use
# `make_nonzero_det` to make the random matrices have nonzero det. For
# `logdet`, we also set `make_nonzero_det(matrix, sign=1)` to make the
# matrix have positive det.
('logdet', lambda: make_nonzero_det(torch.randn(S, S), 1), NO_ARGS, '', NO_ARGS, [skipIfNoLapack]),
('logdet', lambda: make_nonzero_det(torch.randn(1, 1), 1), NO_ARGS, '1x1', NO_ARGS, [skipIfNoLapack]),
('logdet', lambda: make_nonzero_det(random_symmetric_matrix(S), 1), NO_ARGS,
'symmetric', NO_ARGS, [skipIfNoLapack]),
('logdet', lambda: make_nonzero_det(random_symmetric_pd_matrix(S), 1), NO_ARGS,
'symmetric_pd', NO_ARGS, [skipIfNoLapack]),
('logdet', lambda: make_nonzero_det(random_fullrank_matrix_distinct_singular_value(S), 1, 0), NO_ARGS,
'distinct_singular_values', NO_ARGS, [skipIfNoLapack]),
('slogdet', lambda: make_nonzero_det(torch.randn(1, 1), 1), NO_ARGS,
'1x1_pos_det', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: make_nonzero_det(torch.randn(1, 1), -1), NO_ARGS,
'1x1_neg_det', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: make_nonzero_det(torch.randn(S, S), 1), NO_ARGS,
'pos_det', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: make_nonzero_det(torch.randn(S, S), -1), NO_ARGS,
'neg_det', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: make_nonzero_det(random_symmetric_matrix(S)), NO_ARGS,
'symmetric', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: random_symmetric_pd_matrix(S), NO_ARGS,
'symmetric_pd', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('slogdet', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS,
'distinct_singular_values', NO_ARGS, [skipIfNoLapack], itemgetter(1)),
('symeig', lambda: random_symmetric_matrix(S), (True, False), 'lower', NO_ARGS, [skipIfNoLapack]),
('symeig', lambda: random_symmetric_matrix(S), (True, True), 'upper', NO_ARGS, [skipIfNoLapack]),
('symeig', lambda: random_symmetric_matrix(M), (True, True), 'large', NO_ARGS, [skipIfNoLapack]),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S), NO_ARGS, '', NO_ARGS, [skipIfNoLapack]),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:(S - 2)], NO_ARGS,
'wide', NO_ARGS, [skipIfNoLapack]),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:, :(S - 2)], NO_ARGS,
'tall', NO_ARGS, [skipIfNoLapack]),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:(S - 2)], (False,),
'wide_all', NO_ARGS, [skipIfNoLapack], lambda usv: (usv[0], usv[1], usv[2][:, :(S - 2)])),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(S)[:, :(S - 2)], (False,),
'tall_all', NO_ARGS, [skipIfNoLapack], lambda usv: (usv[0][:, :(S - 2)], usv[1], usv[2])),
('svd', lambda: random_fullrank_matrix_distinct_singular_value(M), NO_ARGS,
'large', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S), (random_fullrank_matrix_distinct_singular_value(S, silent=True),), '', NO_ARGS, [skipIfNoLapack]),
('gesv', (S, S, S), (random_fullrank_matrix_distinct_singular_value(S, S, silent=True),),
'batched', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 3, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 3, silent=True),),
'batched_dims', NO_ARGS, [skipIfNoLapack]),
('gesv', (2, 2, S, S), (random_fullrank_matrix_distinct_singular_value(S, 1, silent=True),),
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack]),
('gesv', (1, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 2, silent=True),),
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack]),
('fill_', (S, S, S), (1,), 'number'),
('fill_', (), (1,), 'number_scalar'),
# FIXME: we should compute the derivative w.r.t torch.tensor(1)
('fill_', (S, S, S), (non_differentiable(torch.tensor(1)),), 'variable'),
('eq_', (S, S, S), ((S, S, S),)),
('eq_', (S, S, S), ((1,),), 'broadcast_rhs'),
('eq_', (), ((),), 'scalar'),
('eq_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('ne_', (S, S, S), ((S, S, S),)),
('ne_', (S, S, S), ((1,),), 'broadcast_rhs'),
('ne_', (), ((),), 'scalar'),
('ne_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('gt_', (S, S, S), ((S, S, S),)),
('gt_', (S, S, S), ((1,),), 'broadcast_rhs'),
('gt_', (), ((),), 'scalar'),
('gt_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('ge_', (S, S, S), ((S, S, S),)),
('ge_', (S, S, S), ((1,),), 'broadcast_rhs'),
('ge_', (), ((),), 'scalar'),
('ge_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('lt_', (S, S, S), ((S, S, S),)),
('lt_', (S, S, S), ((1,),), 'broadcast_rhs'),
('lt_', (), ((),), 'scalar'),
('lt_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('le_', (S, S, S), ((S, S, S),)),
('le_', (S, S, S), ((1,),), 'broadcast_rhs'),
('le_', (), ((),), 'scalar'),
('le_', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('eq_', (S, S, S), (0,), 'pyscalar'),
('ne_', (S, S, S), (0,), 'pyscalar'),
('gt_', (S, S, S), (0,), 'pyscalar'),
('ge_', (S, S, S), (0,), 'pyscalar'),
('le_', (S, S, S), (0,), 'pyscalar'),
('lt_', (), (0,), 'pyscalar'),
('eq_', (), (0,), 'pyscalar_scalar'),
('ne_', (), (0,), 'pyscalar_scalar'),
('gt_', (), (0,), 'pyscalar_scalar'),
('ge_', (), (0,), 'pyscalar_scalar'),
('lt_', (), (0,), 'pyscalar_scalar'),
('le_', (), (0,), 'pyscalar_scalar'),
('permute', (1, 2, 3, 4), (0, 2, 3, 1)),
('permute', (1, 2, 3, 4), (0, -2, -1, 1), 'neg_dim'),
('permute', (), (dont_convert(()),), 'scalar'),
('select', (S, S, S), (1, 2), 'dim', [0]),
('select', (S, S, S), (1, -1), 'wrap_dim', [0]),
('select', (S,), (0, 2), '1d'),
('narrow', (S, S, S), (1, 2, 2), 'dim', [0]),
('narrow', (S, S, S), (1, 0, 0), 'empty_dim', [0]),
('squeeze', (S, 1, S, 1), NO_ARGS),
('squeeze', (1, 1, 1, 1), NO_ARGS, 'input_sizes_are_ones'),
('squeeze', (S, 1, S, 1), (1,), '1_dim', [0]),
('squeeze', (S, 1, S, 1), (2,), 'not_1_dim', [0]),
('squeeze', (), (0,), 'scalar', [0]),
('unsqueeze', (S, S, S), (0,), 'first', [0]),
('unsqueeze', (S, S, S), (1,), 'middle', [0]),
('unsqueeze', (S, S, S), (3,), 'last', [0]),
('unsqueeze', (), (0,), 'scalar', [0]),
('chunk', (S, S, S), (2,)),
('chunk', (S, S, S), (S, 1), 'dim', [1]),
('split', (S, S, S), (2,)),
('split', (S, S, S), (S, 1), 'dim', [1]),
('split', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'size_list'),
('split', (S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2), 'size_list_dim', [1]),
('gather', (M, S), (0, gather_variable((S, S), 1, M, True)), 'dim0', [0]),
('gather', (M, S), (1, gather_variable((M, S // 2), 0, S, True)), 'dim1', [0]),
('gather', (), (0, torch.tensor([0], dtype=torch.int64)), 'scalar_input', [0]),
('gather', (S,), (0, torch.tensor(0, dtype=torch.int64)), 'scalar_index', [0]),
('gather', (), (0, torch.tensor(0, dtype=torch.int64)), 'scalar_both', [0]),
('scatter', (M, S), (0, gather_variable((S, S), 1, M), (S, S)), 'dim0', [0]),
('scatter', (M, S), (1, gather_variable((M, S // 2), 0, S), (M, S // 2)), 'dim1', [0]),
('scatter', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalar_all_dim0', [0]),
('scatter_add', (M, S), (0, gather_variable((S, S), 1, M), (S, S)), 'dim0', [0]),
('scatter_add', (M, S), (1, gather_variable((M, S // 2), 0, S), (M, S // 2)), 'dim1', [0]),
('scatter_add', (), (0, torch.tensor(0, dtype=torch.int64), ()), 'scalar_all_dim0', [0]),
('masked_select', (M, M), (mask_not_all_zeros((M, M)),)),
('masked_select', (M, M), (mask_not_all_zeros((M,)),), 'broadcast_rhs'),
('masked_select', (M,), (mask_not_all_zeros((M, M)),), 'broadcast_lhs'),
('masked_select', (M, 1, M), (mask_not_all_zeros((M, M)),),
'broadcast_all'),
('masked_select', (), (torch.tensor(1, dtype=torch.uint8),), 'scalar'),
('masked_select', (M, M), (torch.tensor(1, dtype=torch.uint8),), 'scalar_broadcast_rhs'),
('masked_select', (), (mask_not_all_zeros((M, M)),), 'scalar_broadcast_lhs'),
('masked_fill', (M, M), (torch.ByteTensor(M, M).bernoulli_(), 10)),
('masked_fill', (M, M), (torch.ByteTensor(M, M).bernoulli_(), torch.tensor(10)), 'tensor'),
# no lhs or all broadcast on masked_fill or masked_scatter because it's always inplace
('masked_fill', (M, M), (torch.ByteTensor(M,).bernoulli_(), 10), 'broadcast_rhs'),
('masked_fill', (), (torch.tensor(0, dtype=torch.uint8, requires_grad=False).bernoulli_(), 10), 'scalar'),
('masked_fill', (), (torch.tensor(0, dtype=torch.uint8, requires_grad=False).bernoulli_(), torch.tensor(10)),
'scalar_variable'),
('masked_fill', (M, M), (torch.tensor(0, dtype=torch.uint8, requires_grad=False).bernoulli_(), 10),
'scalar_broadcast_rhs'),
('masked_scatter', (M, M), (torch.ByteTensor(M, M).bernoulli_(), (M, M))),
('masked_scatter', (M, M), (torch.ByteTensor(M,).bernoulli_(), (M, M)),
'broadcast_rhs'),
('masked_scatter', (M, M), (bernoulli_scalar(), (M, M)), 'scalar'),
('masked_scatter', (M, M), (bernoulli_scalar(), (M, M)),
'scalar_broadcast_rhs'),
('resize_', (S, S, S), (torch.Size([S * S, S])), 'fewer_dims'),
('resize_', (), (dont_convert(()),), 'scalar'),
('resize_', (), (torch.Size([1, 1, 1])), 'scalar_to_dims'),
('resize_as_', (), (non_differentiable(torch.tensor(5.)),), 'scalar'),
('resize_as_', (), (non_differentiable(torch.randn((1, 1, 1))),), 'scalar_to_dims'),
('resize_as_', (S, S, S), (non_differentiable(torch.randn(S * S, S)),)),
('sort', (S, M, S), NO_ARGS),
('sort', (S, M, S), (1,), 'dim'),
('sort', (S, M, S), (1, True), 'dim_desc'),
('sort', (), NO_ARGS, 'scalar'),
('sort', (), (0,), 'dim_scalar'),
('sort', (), (0, True), 'dim_desc_scalar'),
('topk', (S, M, S), (3,)),
('topk', (S, M, S), (3, 1), 'dim', [1]),
('topk', (S, M, S), (3, 1, True), 'dim_desc', [1]),
('topk', (S, M, S), (3, 1, True, True), 'dim_desc_sort', [1]),
('topk', (), (1,), 'scalar'),
('topk', (), (1, 0), 'dim_scalar', [1]),
('topk', (), (1, 0, True), 'dim_desc_scalar', [1]),
('topk', (), (1, 0, True, True), 'dim_desc_sort_scalar', [1]),
('take', (S, S, S), (torch.LongTensor([[-3, 2], [20, 2]]),)),
('take', (S, S, S), (torch.tensor(0, dtype=torch.int64),), 'scalar_index'),
('take', (), (torch.LongTensor([0]),), 'scalar_data'),
('take', (), (torch.tensor(0, dtype=torch.int64),), 'scalar_both'),
('where', (M, M), (mask_not_all_zeros((M, M)), (M, M))),
('where', (M, 1, M), (mask_not_all_zeros((M, M)), (M, M, 1)), 'broadcast_all'),
('where', (), (bernoulli_scalar(), ()), 'scalar'),
('where', (M, 1, M), (bernoulli_scalar(), (M, M, 1)), 'scalar_broadcast_mask'),
('where', (), (mask_not_all_zeros((M, M)), ()), 'scalar_broadcast_non_mask'),
('__getitem__', torch.randn(S, S, S), (dont_convert([1, 2]),)),
('__getitem__', torch.randn(S, S, S), (slice(0, 3),), 'slice'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(0, 3), 1]),), 'slice_index'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 2, 3], [1, 3, 3], [0, 0, 2]]),), 'adv_index'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 0, 3], [1, 1, 3], [0, 0, 2]]),), 'adv_index_dup'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(None), slice(None), [0, 3]]),), 'adv_index_end'),
('__getitem__', torch.randn(S, S, S), (dont_convert([slice(None), [0, 3], slice(None)]),), 'adv_index_mid'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], slice(None), slice(None)]),), 'adv_index_beg'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], [1, 2], slice(None)]),), 'adv_index_comb'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], ]),), 'adv_index_sub'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], slice(None)]),), 'adv_index_sub_2'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 3], Ellipsis]),), 'adv_index_sub_3'),
('__getitem__', torch.randn(S, S, S), (dont_convert([[0, 2, 3], [1, 3, 3],
torch.LongTensor([0, 0, 2])]),), 'adv_index_var'),
]
# TODO: clamp with min/max
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = torch.randn((), dtype=torch.double)
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return Variable(maybe_non_contig(torch.randn(*arg, dtype=torch.double)), requires_grad=requires_grad)
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
return maybe_non_contig(arg.tensor)
return maybe_non_contig(arg.tensor)
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
v = maybe_non_contig(arg).detach()
v.requires_grad = requires_grad and v.is_floating_point()
return v
elif callable(arg):
return map_arg(arg())
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
EXCLUDE_FUNCTIONAL = {
'addmm',
'addmm_',
'addbmm',
'baddbmm',
'addmv',
'addmv_',
'addr',
'addr_',
'reshape',
'where' # argument order
}
EXCLUDE_GRADCHECK = {
}
EXCLUDE_GRADGRADCHECK = {
}
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME = {
# *det methods uses svd in backward when matrix is not invertible. However,
# svd backward is unstable unless the matrix has positive distinct singular
# values. Generated random matrices satisfy this with high probability, but
# we can't rely on it. So only test gradgrad on invertible test cases and
# _distinct_singular_values.
'test_det',
'test_det_1x1',
'test_det_symmetric',
'test_det_symmetric_psd',
'test_det_dim2_null',
'test_det_rank1',
'test_det_rank2',
'test_logdet',
'test_logdet_1x1',
'test_logdet_symmetric',
'test_slogdet_1x1_neg_det',
'test_slogdet_neg_det',
'test_slogdet_symmetric',
}
def exclude_tensor_method(name, test_name):
# there are no tensor equivalents for these (inplace or out)
exclude_all_tensor_method_by_test_name = {
'test_clamp_min',
'test_clamp_max',
'test_clamp_min_scalar',
'test_clamp_max_scalar',
'test_slice',
'test_where',
'test_where_broadcast_all',
'test_where_scalar',
'test_where_scalar_broadcast_mask',
'test_where_scalar_broadcast_non_mask',
}
# there are no out-of-place tensor equivalents for these
exclude_outplace_tensor_method = {
'index_add',
'index_copy',
'index_fill',
'masked_fill',
'masked_scatter',
'scatter',
'scatter_add',
'det',
}
if test_name in exclude_all_tensor_method_by_test_name:
return True
is_magic_method = name[:2] == '__' and name[-2:] == '__'
is_inplace = name[-1] == "_" and not is_magic_method
if not is_inplace and name in exclude_outplace_tensor_method:
return True
return False
| [
"[email protected]"
]
| |
a13090e72f2f244bcb3ccd776e7dee54df3c55e2 | 707287238a36b8e5f3e26c347cca580549b441e5 | /combgen/gray_graph/multiradix.py | 9128fc33cef7e9711baa3cb63173e8f51a80e05a | []
| no_license | sahands/coroutine-generation | 2a01e3c5a36fc6b82d8087a15591a452e4bca636 | f0b318016b8925b2ab16640a588210548f7989db | refs/heads/master | 2016-09-06T04:54:02.453166 | 2015-01-06T21:32:58 | 2015-01-06T21:32:58 | 17,954,406 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from combgen.multiradix_gray.coroutine import gen_all
from .grapher import generate_pgf_gray_graph
def dist(a, b):
return sum(abs(x - y) for x, y in zip(a, b))
def neighbour(u, v):
return dist(u, v) == 1
def to_str(a):
return '"${}$"'.format(''.join(str(x) for x in a))
def main():
M = [3, 2, 3]
generate_pgf_gray_graph(gen_all, neighbour, to_str, M)
# print()
# print()
# generate_pgf_gray_graph(multiradix_gray_coroutine, neighbour, to_str, M)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
7bd3f3f1fd8443f552fec39bb8e367be7c3eb367 | 15c3ee205f83854a219f4893c5b78a872b6717e3 | /liquepy/num/o3.py | a1f78597692fea653156816a0458cb06911a2570 | [
"MIT"
]
| permissive | geosharma/liquepy | 7def9e793915268ce9365e474749bdf8d0aa3166 | 05d810fe643128b60430bf7f91be899cab9542c6 | refs/heads/master | 2022-02-18T17:24:44.446980 | 2022-02-13T16:57:47 | 2022-02-13T16:57:47 | 219,091,135 | 4 | 0 | MIT | 2021-03-20T13:52:20 | 2019-11-02T02:11:33 | null | UTF-8 | Python | false | false | 2,492 | py | from liquepy.num.models import PM4Sand as PM4SandBase
from liquepy.num.models import StressDensityModel as StressDensityModelBase
from liquepy.num import models
class PM4Sand(PM4SandBase):
type = "pm4sand"
o3_type = 'pm4sand'
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
PM4SandBase.__init__(self, wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'd_r': 'relative_density',
'g_o': 'g0_mod',
'den': 'unit_moist_mass',
'nu': 'poissons_ratio'
}
def __repr__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
def __str__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
class ManzariDafaliasModel(models.ManzariDafaliasModel):
o3_type = 'manzaridafalias_model'
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
models.ManzariDafaliasModel.__init__(self, wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'den': 'unit_moist_mass',
'nu': 'poissons_ratio'
}
def __repr__(self):
return f"ManzariDafaliasModelO3 Soil model, id={self.id}, m_c={self.m_c:.1f}, e_curr={self.e_curr:.2f}"
def __str__(self):
return f"ManzariDafaliasModelO3 Soil model, id={self.id}, m_c={self.m_c:.1f}, e_curr={self.e_curr:.2f}"
class StressDensityModel(StressDensityModelBase):
type = "stress_density_model"
def __init__(self, wmd=None, liq_mass_density=None, liq_sg=1.0, g=9.8, p_atm=101000.0, **kwargs):
super(StressDensityModel, self).__init__(wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, p_atm=p_atm, **kwargs)
self._extra_class_inputs = []
self.app2mod = {
'e_init': 'e_curr',
'den': 'unit_moist_mass',
'nu': 'poissons_ratio',
'n': 'a'
}
def __repr__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
def __str__(self):
return "PM4SandO3 Soil model, id=%i, phi=%.1f, Dr=%.2f" % (self.id, self.phi, self.relative_density)
| [
"[email protected]"
]
| |
e3bd554ad63149e3043d6a8c0f9163f73ce18252 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon4504.py | 487d5335911e971092814510b9c7ce504d665229 | []
| no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n=int(input())
while True:
m=int(input())
if m==0:
break
print(("{} is NOT ".format(m) if m%n!=0 else "{} is ".format(m))+"a multiple of {}.".format(n)) | [
"[email protected]"
]
| |
7d43725ec9a45d83dc444544244da257a3c11c20 | 38fe7f92d33a6091e5bfa42b1539bb7409eee0cd | /Solutions/MissingNumber/missingNo.py | 96df0889669e11b4dd505034eb5994ddca5036de | []
| no_license | phibzy/InterviewQPractice | bbda3fb1eb068d7870b6e8880cea16e342368bd7 | 0c8641ffb48c862ebb4e5471ae0915780b0a9f98 | refs/heads/master | 2023-03-28T08:12:47.146313 | 2021-03-30T23:54:13 | 2021-03-30T23:54:13 | 259,839,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | #!/usr/bin/python3
"""
@author : Chris Phibbs
@created : Thursday Mar 04, 2021 13:24:11 AEDT
@file : missingNo
"""
class Solution:
# The cheeeeeeeeeeeese
# Just find the difference between the expected sum of all numbers 0 to N
# and the sum of everything in nums
# TC: O(N) - possibly O(1) depending on list implementation
# SC: O(1)
def missingNumber(self, nums):
return (sum(range(len(nums)+1)) - sum(nums))
| [
"[email protected]"
]
| |
796e614f4db6b151308e0cdcec154a1c3036ce39 | 247c5a57c5014c135a24455298fdcea2f2e59a40 | /pretreatment/barrages_prepro.py | 4261af6ff1a1762da9c34023c53962a4d6db77df | [
"Apache-2.0"
]
| permissive | gdh756462786/transformer_barrages | 231d06939c0624ddeaf8c7de204d4dfa56e580c7 | 08d4de27cda354a13d9a7e50ddc8cee2f6cd348c | refs/heads/master | 2022-04-12T12:09:34.286761 | 2020-02-24T08:07:46 | 2020-02-24T08:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,925 | py | # -*- coding: utf-8 -*-
"""
@date: 2020.1.10
@author: liluoqin
@function:
process pretreatment data
"""
import os
import errno
import sentencepiece as spm
import re
import logging
import jieba
import sys
from sklearn.model_selection import train_test_split
sys.path.append("..")
from hparams import Hparams
logging.basicConfig(level=logging.INFO)
file_path = os.path.dirname(__file__)
def prepro(hp):
barrages_data = os.path.join(file_path, '..', hp.barrages_data)
# train
_prepro = lambda x: [line.split("\t")[0] for line in open(x, 'r', encoding="utf-8").readlines()
if not line.startswith("barrage")]
def _write(sents, fname):
with open(fname, 'w', encoding="utf-8") as fout:
fout.write("\n".join(sents))
logging.info("# Preprocessing")
prepro_sents = _prepro(barrages_data)
logging.info("# write preprocessed files to disk")
os.makedirs("../barrages_data/prepro", exist_ok=True)
# split data
train_x, test_x, train_y, test_y = train_test_split(prepro_sents, prepro_sents, test_size=0.2)
_write(prepro_sents, "../barrages_data/prepro/all_sents.txt")
_write(train_x, "../barrages_data/prepro/train_x.txt")
_write(train_y, "../barrages_data/prepro/train_y.txt")
_write(test_x, "../barrages_data/prepro/test_x.txt")
_write(test_y, "../barrages_data/prepro/test_y.txt")
logging.info("# Train a joint BPE model with sentencepiece")
os.makedirs("../barrages_data/segmented", exist_ok=True)
train = '--input=../barrages_data/prepro/all_sents.txt --pad_id=0 --unk_id=1 \
--bos_id=2 --eos_id=3\
--model_prefix=../barrages_data/segmented/bpe --vocab_size={} \
--model_type=bpe'.format(hp.vocab_size)
spm.SentencePieceTrainer.Train(train)
logging.info("# Load trained bpe model")
sp = spm.SentencePieceProcessor()
sp.Load("../barrages_data/segmented/bpe.model")
logging.info("# Segment")
def _segment_and_write(sents, fname):
with open(fname, "w", encoding="utf-8") as fout:
for sent in sents:
pieces = sp.EncodeAsPieces(sent)
fout.write(" ".join(pieces) + "\n")
_segment_and_write(train_x, "../barrages_data/segmented/train_x.bpe")
_segment_and_write(train_y, "../barrages_data/segmented/train_y.bpe")
_segment_and_write(test_x, "../barrages_data/segmented/test_x.bpe")
_segment_and_write(test_y, "../barrages_data/segmented/test_y.bpe")
logging.info("# Let's see how segmented data look like")
print("train:", open("../barrages_data/segmented/train_x.bpe", 'r', encoding="utf-8").readline())
print("test:", open("../barrages_data/segmented/test_x.bpe", 'r', encoding="utf-8").readline())
if __name__ == "__main__":
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
prepro(hp)
logging.info("# Done") | [
"[email protected]"
]
| |
b7a5e69b7f2fe61ac63f316b9653590b36e66ec3 | 8d179300d8583dd9738b6aea821a82803c7f17ea | /iom/iom/admin.py | 0c7cabd4bf5cfe94ca6540a93d582235f908d5e2 | []
| no_license | tkleinen/acaciadata | 4ad10269e79b04febc52f4b5d49b4f4c172df4a5 | f142e9ec0048a2fc6af6bd8d5b357178c0ee93c7 | refs/heads/master | 2020-04-16T00:50:25.354427 | 2016-08-30T11:54:52 | 2016-08-30T11:54:52 | 33,363,185 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | '''
Created on Jun 16, 2015
@author: theo
'''
from django.contrib import admin
from django import forms
from django.forms import Textarea
from django.contrib.gis.db import models
from .models import UserProfile, Adres, Waarnemer, Meetpunt, Watergang, Organisatie
from acacia.data.models import Series, DataPoint
from django.core.exceptions import ValidationError
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
import re
class UserProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(UserAdmin):
inlines = (UserProfileInline, )
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Watergang)
class WatergangAdmin(admin.ModelAdmin):
list_display = ('identifica', 'naamnl', 'typewater', 'breedtekla', 'hoofdafwat')
search_fields = ('identifica', 'naamnl', )
list_filter = ('hoofdafwat', 'breedtekla', 'typewater')
class DataPointInline(admin.TabularInline):
model = DataPoint
class SeriesInline(admin.TabularInline):
model = Series
inlines = (DataPointInline,)
@admin.register(Meetpunt)
class MeetpuntAdmin(admin.ModelAdmin):
list_display = ('name', 'nummer', 'waarnemer')
list_filter = ('waarnemer', )
search_fields = ('name', 'nummer', 'waarnemer', )
fields = ('waarnemer','nummer', 'location', 'watergang','description', )
formfield_overrides = {models.PointField:{'widget': Textarea}}
raw_id_fields = ('watergang',)
autocomplete_lookup_fields = {
'fk': ['watergang',],
}
def save_model(self,request,obj,form,change):
obj.name = 'MP%d.%d' % (obj.waarnemer.id, obj.nummer)
obj.save()
class AdresForm(forms.ModelForm):
model = Adres
def clean_postcode(self):
pattern = r'\d{4}\s*[A-Za-z]{2}'
data = self.cleaned_data['postcode']
if re.search(pattern, data) is None:
raise ValidationError('Onjuiste postcode')
return data
@admin.register(Adres)
class AdresAdmin(admin.ModelAdmin):
form = AdresForm
fieldsets = (
('', {'fields': (('straat', 'huisnummer', 'toevoeging'),('postcode', 'plaats')),
'classes': ('grp-collapse grp-open',),
}
),
)
@admin.register(Waarnemer)
class WaarnemerAdmin(admin.ModelAdmin):
list_display = ('achternaam', 'tussenvoegsel', 'voornaam', 'organisatie')
list_filter = ('achternaam', 'organisatie')
search_fields = ('achternaam', 'voornaam', )
ordering = ('achternaam', )
@admin.register(Organisatie)
class OrganisatieAdmin(admin.ModelAdmin):
raw_id_fields = ('adres',)
autocomplete_lookup_fields = {
'fk': ['adres',],
}
| [
"[email protected]"
]
| |
0efdd1a0ebe604430982bbf8426b508f8fb2c0be | dd32803789eb65a518457491cdbda7a32811713d | /app_market/migrations/0064_shiftappealinsurance.py | ae7988c2161c3a61cc19580b4db159d561bdafa3 | []
| no_license | shmeser/giberno-postman | 4d974f0c9e69a4f6fb64f124b754acf9d732d79b | 19ffefa3f818c04831aaed28e6540274ba4fbc90 | refs/heads/master | 2023-08-14T22:35:09.034847 | 2021-09-01T11:36:43 | 2021-09-01T11:36:43 | 407,432,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,814 | py | # Generated by Django 3.1.4 on 2021-08-03 07:47
import app_market.enums
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_market', '0063_auto_20210713_1532'),
]
operations = [
migrations.CreateModel(
name='ShiftAppealInsurance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('number', models.CharField(blank=True, max_length=255, null=True, verbose_name='Номер полиса')),
('insurer', models.TextField(blank=True, max_length=4096, null=True, verbose_name='Страховщик')),
('insured_birth_date', models.DateTimeField(blank=True, null=True)),
('insured_passport', models.CharField(blank=True, max_length=255, null=True)),
('insured_phone', models.CharField(blank=True, max_length=255, null=True)),
('insured_email', models.CharField(blank=True, max_length=255, null=True)),
('insured_reg_address', models.CharField(blank=True, max_length=255, null=True)),
('insured_address', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary', models.CharField(blank=True, max_length=255, null=True)),
('time_start', models.DateTimeField(blank=True, null=True, verbose_name='Начало страхового периода')),
('time_end', models.DateTimeField(blank=True, null=True, verbose_name='Окончание страхового периода')),
('address', models.CharField(blank=True, max_length=255, null=True)),
('currency', models.PositiveIntegerField(choices=[(0, 'BONUS'), (1, 'USD'), (2, 'EUR'), (3, 'RUB')], default=app_market.enums.Currency['RUB'], verbose_name='Валюта')),
('insurance_premium', models.PositiveIntegerField(blank=True, null=True)),
('insurance_payment_expiration', models.DateTimeField(blank=True, null=True, verbose_name='Срок оплаты страховой премии')),
('insured_description', models.TextField(blank=True, max_length=4096, null=True)),
('risks', django.contrib.postgres.fields.ArrayField(base_field=models.JSONField(blank=True, null=True), blank=True, null=True, size=10)),
('risks_description', models.TextField(blank=True, max_length=4096, null=True)),
('special_conditions', models.TextField(blank=True, max_length=4096, null=True)),
('insurer_proxy_number', models.CharField(blank=True, max_length=255, null=True, verbose_name='Номер доверенности представителя страховщика')),
('insurer_sign', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подпись страхователя')),
('confirmed_at', models.DateTimeField(blank=True, null=True)),
('appeal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='insurance', to='app_market.shiftappeal')),
],
options={
'verbose_name': 'Страховка на период рабочей смены',
'verbose_name_plural': 'Страховки на период рабочих смен',
'db_table': 'app_market__shift_appeal_insurance',
},
),
]
| [
"[email protected]"
]
| |
87c2060d5dfd08a359e93d8d648496437635aff6 | 66f09ba0bc473b2e2eaa2c8bca6e4a97a550fbc5 | /orders/migrations/0001_initial.py | f6cff0f9d32bf075d0f4d6c6acb5be846546325a | []
| no_license | sanix-sandel/ZanduB | 729aefb83b14f4dd8e669b1b21e07f5b96271f69 | df1159c8ccf3fddda90a5eff21b43024ca02de03 | refs/heads/master | 2022-11-30T20:29:12.908518 | 2020-08-11T03:58:44 | 2020-08-11T03:58:44 | 269,744,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | # Generated by Django 3.0.7 on 2020-07-08 08:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
('stores', '0002_auto_20200706_0906'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.BooleanField(default=False)),
('paid', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='stores.Store')),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='products.Product')),
],
),
]
| [
"[email protected]"
]
| |
b7f601fdccad85414b32a5e52a27d993d88b141f | f82dff6fcefbbfdfc78a6eab3739034729e164cc | /h.py | c16820148e78c1116617972be8cf038b6e18e1af | []
| no_license | junelynpalma/j | 89be92bb5375e853308f534a44bdcd1837c2c0e2 | 90509d88839af3825ffcab440489922bdc9e5d18 | refs/heads/main | 2023-08-07T04:18:49.834712 | 2021-09-27T13:12:35 | 2021-09-27T13:12:35 | 410,886,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import schedule
import time
import os
import sys
os.system('node g.js http://www.dogcopc.com/LOGIN http.txt 600 GET PHPSESSID:gq15q5ho3eqq6gatdqm6nqdva5')
def job():
os.system('node g.js http://www.dogcopc.com/LOGIN http.txt 600 GET PHPSESSID:gq15q5ho3eqq6gatdqm6nqdva5')
schedule.every(1).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1) | [
"[email protected]"
]
| |
d9c0423c56eef2d1798fb3f943cef5063284917d | 2f308fdfd3e0b04a620db242694c27f9dcf80f68 | /src/projects/migrations/0070_auto_20161015_1458.py | feb4546da5cdc53d28c2214689d8f3eaa8df4ab8 | []
| no_license | ouhouhsami/ecoquartier | 53943b1f54f9e958f17424a7de37bf037c6899d1 | 1faf47a70e1f8821245588fca3d2b70c80129ae7 | refs/heads/master | 2020-06-30T00:58:10.013928 | 2016-12-13T16:58:57 | 2016-12-13T16:58:57 | 67,034,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 14:58
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
LabelEcoQuartier = apps.get_model("projects", "LabelEcoQuartier")
l, created = LabelEcoQuartier.objects.get_or_create(label="charte")
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0069_remove_project_type_operation'),
]
operations = [
migrations.RunPython(forward, backward),
]
| [
"[email protected]"
]
| |
fc889c6c341df9b46d24a318d471370fb6bb9497 | eba02c3c98f00288e81b5898a201cc29518364f7 | /chapter_007/pets.py | 83d401ba31dbbed09db27ad6c7c54fce7c610e20 | []
| no_license | kengru/pcrash-course | 29f3cf49acfd4a177387634410d28de71d279e06 | 5aa5b174e85a0964eaeee1874b2be1c144b7c192 | refs/heads/master | 2021-05-16T09:36:16.349626 | 2017-10-11T17:56:56 | 2017-10-11T17:56:56 | 104,481,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | # Removing items from list with a while loop.
pets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']
print(pets)
while 'cat' in pets:
pets.remove('cat')
print(pets)
| [
"[email protected]"
]
| |
8de58e05650b8fbf1174f6077c873c40a7c31650 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02786/s372106911.py | 8a940c6ed8583cff7a6cd36516561dbf9608fecd | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | h = int(input())
ans = 1
while h > 0:
h //= 2
ans *= 2
print(ans-1) | [
"[email protected]"
]
| |
b4c299a4065fcde746b3f7843fc31b19b6eca454 | 22bf086e3e7d43b72f0d05aaa3359b766a688a79 | /workflow_simons.py | afc8b316ce4f06f00e2bb432169a8b48819982e7 | []
| no_license | kaspermunch/humanXsweeps | aa7a4e6a175be276713f17f79d7179a5dd644ff5 | 3a2c4aa496aaffa837eb15dd3d382f7712266f38 | refs/heads/master | 2023-04-07T13:36:11.619973 | 2023-03-18T08:05:18 | 2023-03-18T08:05:18 | 161,376,285 | 1 | 0 | null | 2023-01-11T14:12:39 | 2018-12-11T18:25:53 | Jupyter Notebook | UTF-8 | Python | false | false | 91,935 | py |
from gwf import Workflow
import sys, os, glob, itertools, re
from collections import defaultdict
#from pathlib import Path
from random import seed
seed(42)
sys.path.append('./scripts')
sys.path.append('./notebooks')
import simons_meta_data
import hg19_chrom_sizes
import analysis_globals
from templates import *
gwf = Workflow(defaults={'account': 'simons'})
#################################################################################
# Load meta data
#################################################################################
individuals, populations, regions = simons_meta_data.get_meta_data()
#################################################################################
# Project root dir
#################################################################################
#faststorage = '../../'
faststorage = '/home/kmt/simons/faststorage'
#faststorage = '/project/simons/faststorage'
mydir = os.path.join(faststorage, 'people', 'kmt')
#mydir = '.'
#################################################################################
# simons input files
#################################################################################
# reference sequence file
reference_file_name = os.path.join(faststorage, 'data', 'cteam_lite_public3', 'FullyPublic', 'Href.fa')
ust_ishim_sample_id = 'Ust_Ishim'
altai_sample_id = 'Altai'
denisova_sample_id = 'Denisova'
orig_sample_files = list()
orig_mask_files = list()
for sample_id in sorted(individuals):
orig_sample_files.append(os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccomp.fa.rz'.format(sample_id)))
orig_mask_files.append(os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccompmask.fa.rz'.format(sample_id)))
# ust ishim:
orig_ust_ishim_sample_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccomp.fa.rz'.format(ust_ishim_sample_id))
orig_ust_ishim_mask_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccompmask.fa.rz'.format(ust_ishim_sample_id))
# altai:
orig_altai_sample_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccomp.fa.rz'.format(altai_sample_id))
orig_altai_mask_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccompmask.fa.rz'.format(altai_sample_id))
# denisova:
orig_denisova_sample_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccomp.fa.rz'.format(denisova_sample_id))
orig_denisova_mask_file = os.path.join(faststorage,
'data', 'cteam_lite_public3',
'FullyPublic', '{}.ccompmask.fa.rz'.format(denisova_sample_id))
#################################################################################
# turn rz files into gzip files
#################################################################################
# I do this because there is some trailing grabage in
# the rz files that python gzip cannot handle
# dir for files
sample_dir = os.path.join(mydir, 'steps', 'gziped_samples')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
sample_files = [modpath(x, parent=sample_dir, suffix='.gz') for x in orig_sample_files]
mask_files = [modpath(x, parent=sample_dir, suffix='.gz') for x in orig_mask_files]
for i, (orig_sample_file, sample_file) in enumerate(zip(orig_sample_files, sample_files)):
gwf.target_from_template('smpl_rz2gz_{}'.format(i), rz2gz(rz_file=str(orig_sample_file), gz_file=str(sample_file)))
for i, (orig_mask_file, mask_file) in enumerate(zip(orig_mask_files, mask_files)):
gwf.target_from_template('mask_rz2gz_{}'.format(i), rz2gz(rz_file=str(orig_mask_file), gz_file=str(mask_file)))
# ust_ishim
ust_ishim_sample_file = modpath(orig_ust_ishim_sample_file, parent=sample_dir, suffix='.gz')
ust_ishim_mask_file = modpath(orig_ust_ishim_mask_file, parent=sample_dir, suffix='.gz')
gwf.target_from_template('smpl_rz2gz_{}'.format('ust_ishim'), rz2gz(rz_file=str(orig_ust_ishim_sample_file), gz_file=str(ust_ishim_sample_file)))
gwf.target_from_template('mask_rz2gz_{}'.format('ust_ishim'), rz2gz(rz_file=str(orig_ust_ishim_mask_file), gz_file=str(ust_ishim_mask_file)))
# altai
altai_sample_file = modpath(orig_altai_sample_file, parent=sample_dir, suffix='.gz')
altai_mask_file = modpath(orig_altai_mask_file, parent=sample_dir, suffix='.gz')
gwf.target_from_template('smpl_rz2gz_{}'.format('altai'), rz2gz(rz_file=str(orig_altai_sample_file), gz_file=str(altai_sample_file)))
gwf.target_from_template('mask_rz2gz_{}'.format('altai'), rz2gz(rz_file=str(orig_altai_mask_file), gz_file=str(altai_mask_file)))
# denisova
denisova_sample_file = modpath(orig_denisova_sample_file, parent=sample_dir, suffix='.gz')
denisova_mask_file = modpath(orig_denisova_mask_file, parent=sample_dir, suffix='.gz')
gwf.target_from_template('smpl_rz2gz_{}'.format('denisova'), rz2gz(rz_file=str(orig_denisova_sample_file), gz_file=str(denisova_sample_file)))
gwf.target_from_template('mask_rz2gz_{}'.format('denisova'), rz2gz(rz_file=str(orig_denisova_mask_file), gz_file=str(denisova_mask_file)))
# Hack to make the seq and mask same length for Ust Ishim (trimmed to shortest one of the two):
trimmed_ust_ishim_sample_file = modpath(ust_ishim_sample_file, suffix=('.fa.gz', '.trimmed.fa.gz'))
trimmed_ust_ishim_mask_file = modpath(ust_ishim_mask_file, suffix=('.fa.gz', '.trimmed.fa.gz'))
g = gwf.target("trim_ust_ishim", inputs=[ust_ishim_sample_file, ust_ishim_mask_file],
outputs=[trimmed_ust_ishim_sample_file, trimmed_ust_ishim_mask_file],
memory='15g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/trim_ust_ishim.py {} {} {} {}
""".format(ust_ishim_sample_file, ust_ishim_mask_file, trimmed_ust_ishim_sample_file, trimmed_ust_ishim_mask_file)
# Hack to make the archaic sequences same length as all other:
# random sample file as template:
template_sample_file = sample_files[0]
padded_altai_sample_file = modpath(altai_sample_file, suffix=('.fa.gz', '.padded.fa.gz'))
padded_altai_mask_file = modpath(altai_mask_file, suffix=('.fa.gz', '.padded.fa.gz'))
gwf.target_from_template("pad_altai", pad_archaic_files(template_file=template_sample_file,
input_file=altai_sample_file, pad_char='N', output_file=padded_altai_sample_file))
gwf.target_from_template("pad_altai_mask", pad_archaic_files(template_file=template_sample_file,
input_file=altai_mask_file, pad_char='0', output_file=padded_altai_mask_file))
padded_denisova_sample_file = modpath(denisova_sample_file, suffix=('.fa.gz', '.padded.fa.gz'))
padded_denisova_mask_file = modpath(denisova_mask_file, suffix=('.fa.gz', '.padded.fa.gz'))
gwf.target_from_template("pad_denisova", pad_archaic_files(template_file=template_sample_file,
input_file=denisova_sample_file, pad_char='N', output_file=padded_denisova_sample_file))
gwf.target_from_template("pad_denisova_mask", pad_archaic_files(template_file=template_sample_file,
input_file=denisova_mask_file, pad_char='0', output_file=padded_denisova_mask_file))
# g = gwf.target("pad_archaic", inputs=[altai_sample_file, altai_mask_file],
# outputs=[padded_altai_sample_file, padded_altai_mask_file],
# memory='15g', walltime='11:00:00') << """
# conda activate simons
# python scripts/pad_archaic_genome.py {template} {input_seq} N {output_seq}
# python scripts/pad_archaic_genome.py {template} {input_mask} 0 {output_mask}
# """.format(template=template_sample_file,
# input_seq=altai_sample_file, output_seq=padded_altai_sample_file,
# input_mask=altai_mask_file, output_mask=padded_altai_mask_file)
#################################################################################
# mask samples
#################################################################################
mask_level = 1
# dir for files
masked_sample_dir = os.path.join(mydir, 'steps', 'masked_samples')
if not os.path.exists(masked_sample_dir):
os.makedirs(masked_sample_dir)
masked_sample_files = [modpath(x, parent=masked_sample_dir) for x in sample_files]
for i, (unmasked, mask, masked) in enumerate(zip(sample_files, mask_files, masked_sample_files)):
gwf.target_from_template("masking_{}".format(i), mask_sample(unmasked_file=str(unmasked),
mask_file=str(mask), masked_file=str(masked), mask_level=mask_level))
# ust_ishim
ust_ishim_masked_sample_file = modpath(trimmed_ust_ishim_sample_file, parent=masked_sample_dir)
gwf.target_from_template("masking_{}".format('ust_ishim'), mask_sample(unmasked_file=str(trimmed_ust_ishim_sample_file),
mask_file=str(trimmed_ust_ishim_mask_file), masked_file=str(ust_ishim_masked_sample_file),
mask_level=mask_level,
skip=['Y']))
# altai
altai_masked_sample_file = modpath(padded_altai_sample_file, parent=masked_sample_dir)
gwf.target_from_template("masking_{}".format('altai'), mask_sample(unmasked_file=str(padded_altai_sample_file),
mask_file=str(padded_altai_mask_file), masked_file=str(altai_masked_sample_file),
mask_level=mask_level,
skip=['Y']))
# denisova
denisova_masked_sample_file = modpath(padded_denisova_sample_file, parent=masked_sample_dir)
gwf.target_from_template("masking_{}".format('denisova'), mask_sample(unmasked_file=str(padded_denisova_sample_file),
mask_file=str(padded_denisova_mask_file), masked_file=str(denisova_masked_sample_file),
mask_level=mask_level,
skip=['Y']))
#################################################################################
# generate pseudohaploids
#################################################################################
# dir for files
pseudohaploid_dir = os.path.join(mydir, 'steps', 'pseudohaploid_genomes')
if not os.path.exists(pseudohaploid_dir):
os.makedirs(pseudohaploid_dir)
# Build targets for generating pseudhaploids
pseudohaploid_file_names = defaultdict(list)
for i, sample_file_name in enumerate(masked_sample_files):
basename = os.path.basename(sample_file_name).split('.')[0]
output_file_name1 = modpath('{}-A.fa.gz'.format(basename), parent=pseudohaploid_dir)
output_file_name2 = modpath('{}-B.fa.gz'.format(basename), parent=pseudohaploid_dir)
pop = individuals[basename]['Population ID']
pseudohaploid_file_names[pop].extend([os.path.join(output_file_name1), os.path.join(output_file_name2)])
# # NB: if male only the pseudohaploid to list of pesudohaploid files for downstread analysis
# is_female = individuals[basename]['Genetic sex assignment'] == 'XX'
# pseudohaploid_file_names[pop].append(os.path.join(output_file_name1))
# if is_female:
# pseudohaploid_file_names[pop].append(os.path.join(output_file_name2))
gwf.target_from_template('psudohaploids_{}'.format(i), pseudohaploids(input_file=sample_file_name,
ref_file_name=str(reference_file_name),
output_file1=str(output_file_name1),
output_file2=str(output_file_name2)))
# ust_ishim:
basename = os.path.basename(ust_ishim_masked_sample_file).split('.')[0]
ust_ishim_output_file_name1 = modpath('{}-A.fa.gz'.format(basename), parent=pseudohaploid_dir)
ust_ishim_output_file_name2 = modpath('{}-B.fa.gz'.format(basename), parent=pseudohaploid_dir)
ust_ishim_pseudohaploid_file_names = [os.path.join(ust_ishim_output_file_name1),
os.path.join(ust_ishim_output_file_name2)]
gwf.target_from_template('psudohaploids_{}'.format('ust_ishim'), pseudohaploids(input_file=ust_ishim_masked_sample_file,
ref_file_name=str(reference_file_name),
output_file1=str(ust_ishim_output_file_name1),
output_file2=str(ust_ishim_output_file_name2)))
# altai:
basename = os.path.basename(altai_masked_sample_file).split('.')[0]
altai_output_file_name1 = modpath('{}-A.fa.gz'.format(basename), parent=pseudohaploid_dir)
altai_output_file_name2 = modpath('{}-B.fa.gz'.format(basename), parent=pseudohaploid_dir)
altai_pseudohaploid_file_names = [os.path.join(altai_output_file_name1),
os.path.join(altai_output_file_name2)]
gwf.target_from_template('psudohaploids_{}'.format('altai'), pseudohaploids(input_file=altai_masked_sample_file,
ref_file_name=str(reference_file_name),
output_file1=str(altai_output_file_name1),
output_file2=str(altai_output_file_name2)))
# denisova:
basename = os.path.basename(denisova_masked_sample_file).split('.')[0]
denisova_output_file_name1 = modpath('{}-A.fa.gz'.format(basename), parent=pseudohaploid_dir)
denisova_output_file_name2 = modpath('{}-B.fa.gz'.format(basename), parent=pseudohaploid_dir)
denisova_pseudohaploid_file_names = [os.path.join(denisova_output_file_name1),
os.path.join(denisova_output_file_name2)]
gwf.target_from_template('psudohaploids_{}'.format('denisova'), pseudohaploids(input_file=denisova_masked_sample_file,
ref_file_name=str(reference_file_name),
output_file1=str(denisova_output_file_name1),
output_file2=str(denisova_output_file_name2)))
archaic_pseudohaploid_file_names = altai_pseudohaploid_file_names + denisova_pseudohaploid_file_names
#################################################################################
# compute pwdiff between all male pseudohaplotypes in windows over masked chr7.
# this is somthing I added late to be able to compute global pairwise diffs.
# I needed this pi to compare to expected pi from the simulation demography.
#################################################################################
#####
# first extract male chr7 A haplotypes (so we get as many haplotypes as X)
######
male_subset = list()
for pop in sorted(pseudohaploid_file_names):
for file_name in pseudohaploid_file_names[pop]:
basename = os.path.basename(file_name).split('.')[0]
if basename.endswith('-A'):
if individuals[basename.replace('-A', '')]['Genetic sex assignment'] == 'XY':
male_subset.append(file_name)
# dir for files
male_7_haploids_dir = os.path.join(mydir, 'steps', 'male_7_haploids')
if not os.path.exists(male_7_haploids_dir):
os.makedirs(male_7_haploids_dir)
male_7_haploids = [modpath(x, parent=male_7_haploids_dir, suffix='') for x in male_subset]
for i, (full_genome, only_7) in enumerate(zip(male_subset, male_7_haploids)):
gwf.target_from_template("extract_7_{}".format(i),
extract_7(full_genome=str(full_genome), only_7=str(only_7)))
#####
# then compute pairwise diffs
#####
# size of windows for computing pi
chr7_pwdiff_binsize = 100000
chr7_pwdiff_dir = os.path.join(mydir, 'steps', 'chr7_pwdiff')
if not os.path.exists(chr7_pwdiff_dir):
os.makedirs(chr7_pwdiff_dir)
chr7_pwdiff_file_names = list()
#all_pseudo_haplodid_file_names = sum(pseudohaploid_file_names.values(), [])
i = 0
for file1, file2 in itertools.combinations(male_7_haploids, 2):
indiv1, pseud1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
indiv2, pseud2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
# we do not compare chromosome from the same
# individul to avoid inbreeding arterfcts
if indiv1 == indiv2:
continue
# open files for the pair of pseudohaploids to compare
f1 = modpath(file1, parent=male_7_haploids_dir)
f2 = modpath(file2, parent=male_7_haploids_dir)
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, pseud1, indiv2, pseud2, bp2str(chr7_pwdiff_binsize))
out_file_name = modpath(output_base_name, parent=chr7_pwdiff_dir)
chr7_pwdiff_file_names.append(out_file_name)
gwf.target_from_template('chr7_pwdiff_windows_{}'.format(i),
pi_for_chrom_pair_template(str(f1), str(f2), chr7_pwdiff_binsize, '7', indiv1, pseud1, indiv2, pseud2, str(out_file_name)))
i += 1
#####
# then assemble pwdiff data set for chr7
#####
# dir for files
dist_store_dir = os.path.join(mydir, 'steps', 'chr7_pwdiff_stores')
if not os.path.exists(dist_store_dir):
os.makedirs(dist_store_dir)
#dist_store_base_names = ["dist_data_{}_{}".format(x, bp2str(dist_binsize)) for x in hg19_chrom_sizes.hg19_chrom_sizes.keys()]
dist_store_base_names = ["dist_data_chr7_{}".format(bp2str(chr7_pwdiff_binsize))]
dist_store_files = [modpath(x, parent=dist_store_dir, suffix='.store') for x in dist_store_base_names]
g = gwf.target("build_chr7_pwdiff_datasets", inputs=chr7_pwdiff_file_names, outputs=dist_store_files,
memory='150g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_dist_datasets.py \
--dist-dir {dist_dir} \
--result-dir {dist_store_dir} \
--meta-data-dir {metadata_dir}
""".format(dist_dir=chr7_pwdiff_dir, dist_store_dir=dist_store_dir, metadata_dir='/home/kmt/simons/faststorage/data/metadata')
#################################################################################
# compute pi in windows over masked genomes
#################################################################################
# size of windows for computing pi
pi_binsize = 100000
pi_dir = os.path.join(mydir, 'steps', 'population_pi')
if not os.path.exists(pi_dir):
os.makedirs(pi_dir)
pi_file_names = list()
#arg_list = list()
# iter populations
i = 0
for pop, pop_samples in sorted(pseudohaploid_file_names.items()):
# iter pseudohaploid pairs
for file1, file2 in itertools.combinations(sorted(pop_samples), 2):
indiv1, pseud1 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file1)).groups()
indiv2, pseud2 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file2)).groups()
# we do not compare chromosome from the same
# individul to avoid inbreeding arterfcts
if indiv1 == indiv2:
continue
# open files for the pair of pseudohaploids to compare
f1 = modpath(file1, parent=pseudohaploid_dir)
f2 = modpath(file2, parent=pseudohaploid_dir)
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, pseud1, indiv2, pseud2, bp2str(pi_binsize))
out_file_name = modpath(output_base_name, parent=pi_dir)
pi_file_names.append(out_file_name)
gwf.target_from_template('pi_windows_{}'.format(i), pi_for_pair_template(str(f1), str(f2),
pi_binsize, pop, indiv1, pseud1, indiv2, pseud2, str(out_file_name)))
i += 1
# ust_ishim (in this case we compute the heterozygosity):
file1, file2 = ust_ishim_pseudohaploid_file_names
indiv1, pseud1 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file1)).groups()
indiv2, pseud2 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file2)).groups()
f1 = modpath(file1, parent=pseudohaploid_dir)
f2 = modpath(file2, parent=pseudohaploid_dir)
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, pseud1, indiv2, pseud2, bp2str(pi_binsize))
ust_ishim_pi_file_name = modpath(output_base_name, parent=pi_dir)
gwf.target_from_template('pi_windows_{}'.format('ust_ishim'), pi_for_pair_template(str(f1), str(f2),
pi_binsize, pop, indiv1, pseud1, indiv2, pseud2, str(ust_ishim_pi_file_name)))
#################################################################################
# compute diffs in windows over masked chromosomes
# NB: only compares X pseudohaploids between pairs of which at least one is African
#################################################################################
dist_binsize = 100000
# dir for files
dist_dir = os.path.join(mydir, 'steps', 'afr_nonafr_x_pseudohap_dist')
if not os.path.exists(dist_dir):
os.makedirs(dist_dir)
dist_file_names = list()
# iter populations
i = 0
x_pseudohaploids = list()
for pop in sorted(pseudohaploid_file_names):
x_pseudohaploids.extend(pseudohaploid_file_names[pop])
# iter pseudohaploid pairs
for file1, file2 in itertools.combinations(sorted(x_pseudohaploids), 2):
indiv1, pseud1 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file1)).groups()
indiv2, pseud2 = re.search(r'/([^/]+)-([AB]).fa.gz', str(file2)).groups()
# we do not compare chromosome from the same
# individul to avoid inbreeding arterfcts
if indiv1 == indiv2:
continue
# only compare two individuals if one is an African:
if not (indiv1 in regions['Africa'] or indiv2 in regions['Africa']):
continue
# open files for the pair of pseudohaploids to compare
f1 = modpath(file1, parent=pseudohaploid_dir)
f2 = modpath(file2, parent=pseudohaploid_dir)
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, pseud1, indiv2, pseud2, bp2str(dist_binsize))
out_file_name = modpath(output_base_name, parent=dist_dir)
dist_file_names.append(out_file_name)
gwf.target_from_template('dist_windows_{}'.format(i), dist_for_x_pair_template(str(f1), str(f2),
dist_binsize, 'NA', indiv1, pseud1, indiv2, pseud2, str(out_file_name)))
i += 1
#################################################################################
# Build pi data sets for each chromosome with added meta info
#################################################################################
# dir for files
pi_store_dir = os.path.join(mydir, 'steps', 'pi_stores')
if not os.path.exists(pi_store_dir):
os.makedirs(pi_store_dir)
metadata_dir = '/home/kmt/simons/faststorage/data/metadata'
pi_store_base_names = ["pi_data_{}_{}".format(x, bp2str(pi_binsize)) for x in hg19_chrom_sizes.hg19_chrom_sizes.keys()]
pi_store_files = [modpath(x, parent=pi_store_dir, suffix='.store') for x in pi_store_base_names]
g = gwf.target("build_pi_datasets", inputs=pi_file_names, outputs=pi_store_files,
memory='60g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_pi_datasets.py \
--pi-dir {pi_dir} \
--result-dir {pi_store_dir} \
--meta-data-dir {metadata_dir}
""".format(pi_dir=pi_dir, pi_store_dir=pi_store_dir, metadata_dir=metadata_dir)
#################################################################################
# Build distance data sets for each chromosome with added meta info
# NB: only X pseudohaploids between pairs of which at least one is African
#################################################################################
# dir for files
dist_store_dir = os.path.join(mydir, 'steps', 'dist_stores')
if not os.path.exists(dist_store_dir):
os.makedirs(dist_store_dir)
#dist_store_base_names = ["dist_data_{}_{}".format(x, bp2str(dist_binsize)) for x in hg19_chrom_sizes.hg19_chrom_sizes.keys()]
dist_store_base_names = ["dist_data_chrX_{}".format(bp2str(dist_binsize))]
dist_store_files = [modpath(x, parent=dist_store_dir, suffix='.store') for x in dist_store_base_names]
g = gwf.target("build_dist_datasets", inputs=dist_file_names, outputs=dist_store_files,
memory='150g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_dist_datasets.py \
--dist-dir {dist_dir} \
--result-dir {dist_store_dir} \
--meta-data-dir {metadata_dir}
""".format(dist_dir=dist_dir, dist_store_dir=dist_store_dir, metadata_dir=metadata_dir)
#################################################################################
# extract male x chromosomes
#################################################################################
male_subset = list()
for pop in sorted(pseudohaploid_file_names):
for file_name in pseudohaploid_file_names[pop]:
basename = os.path.basename(file_name).split('.')[0]
if basename.endswith('-A'):
if individuals[basename.replace('-A', '')]['Genetic sex assignment'] == 'XY':
male_subset.append(file_name)
# spike in ust ishim
file_name = ust_ishim_pseudohaploid_file_names[0]
basename = os.path.basename(file_name).split('.')[0]
assert basename.endswith('-A')
male_subset.append(file_name)
# dir for files
male_x_haploids_dir = os.path.join(mydir, 'steps', 'male_x_haploids')
if not os.path.exists(male_x_haploids_dir):
os.makedirs(male_x_haploids_dir)
male_x_haploids = [modpath(x, parent=male_x_haploids_dir, suffix='') for x in male_subset]
for i, (full_genome, only_x) in enumerate(zip(male_subset, male_x_haploids)):
gwf.target_from_template("extract_x_{}".format(i),
extract_x(full_genome=str(full_genome), only_x=str(only_x)))
#################################################################################
# extract x pseudohaploids for altai and denisova
#################################################################################
# dir for files
archaic_x_pseudohaploids_dir = os.path.join(mydir, 'steps', 'archaic_x_pseudohaploids')
if not os.path.exists(archaic_x_pseudohaploids_dir):
os.makedirs(archaic_x_pseudohaploids_dir)
archaic_x_pseudohaploids = [modpath(x, parent=archaic_x_pseudohaploids_dir, suffix='') for x in archaic_pseudohaploid_file_names]
for i, (full_genome, only_x) in enumerate(zip(archaic_pseudohaploid_file_names, archaic_x_pseudohaploids)):
gwf.target_from_template("extract_archaic_x_{}".format(i),
extract_x(full_genome=str(full_genome), only_x=str(only_x)))
##################################################################################
## mask out ampliconic regions (replace with N) from extracted male X chromosomes
##################################################################################
#
#male_x_haploids_ampl_masked_dir = os.path.join(mydir, 'steps', 'male_x_haploids_ampl_masked')
#if not os.path.exists(male_x_haploids_ampl_masked_dir):
# os.makedirs(male_x_haploids_ampl_masked_dir)
#
#male_x_haploids_ampl_masked = [modpath(x, parent=male_x_haploids_ampl_masked_dir, suffix='.fa') for x in male_x_haploids]
#
#ampl_regions_file = '/home/kmt/simons/faststorage/people/kmt/data/coordinates_hg18_hg19_hg38_Amplicons_Gap.txt'
#
#for i, (unmasked, masked) in enumerate(zip(male_x_haploids, male_x_haploids_ampl_masked)):
# gwf.target_from_template("mask_ampliconic_regions_{}".format(i),
# mask_ampliconic_regions(unmasked_file=str(unmasked), masked_file=str(masked), ampl_regions_file=ampl_regions_file))
#################################################################################
# mask out ampliconic regions (replace with N) from extracted male X chromosomes
#################################################################################
# dir for files
male_x_haploids_ampl_masked_dir = os.path.join(mydir, 'steps', 'male_x_haploids_ampl_masked')
if not os.path.exists(male_x_haploids_ampl_masked_dir):
os.makedirs(male_x_haploids_ampl_masked_dir)
male_x_haploids_ampl_masked = [modpath(x, parent=male_x_haploids_ampl_masked_dir, suffix='.fa') for x in male_x_haploids]
ampl_regions_file = '/home/kmt/simons/faststorage/people/kmt/data/coordinates_hg18_hg19_hg38_Amplicons_Gap.txt'
for i, (unmasked, masked) in enumerate(zip(male_x_haploids, male_x_haploids_ampl_masked)):
gwf.target_from_template("mask_ampliconic_regions_{}".format(i),
mask_ampliconic_regions(unmasked_file=str(unmasked), masked_file=str(masked), ampl_regions_file=ampl_regions_file))
#################################################################################
# mask admxiture segments in male x chromosomes
#################################################################################
# dir for files
admix_masked_male_x_haploids_dir = os.path.join(mydir, 'steps', 'male_x_haploids_admix_masked')
if not os.path.exists(admix_masked_male_x_haploids_dir):
os.makedirs(admix_masked_male_x_haploids_dir)
admix_masked_male_x_haploids = [modpath(x, parent=admix_masked_male_x_haploids_dir, suffix='.fa') for x in male_x_haploids]
min_admix_post_prob = 0.8
laurits_admix_pred_file = os.path.join(mydir, 'data/laurits_data/RestofworldHMMHaploid_samePAR.txt')
for i, (unmasked, masked) in enumerate(zip(male_x_haploids, admix_masked_male_x_haploids)):
gwf.target_from_template("admixmask1_x_{}".format(i),
admix_mask(unmasked_file=str(unmasked), masked_file=str(masked),
admix_pred_file=laurits_admix_pred_file, min_post_prob=min_admix_post_prob))
# #################################################################################
# # same but for haplotypes with ampliconic regions masked out
# #################################################################################
# # dir for files
# ampl_and_admix_masked_male_x_haploids_dir = os.path.join(mydir, 'steps', 'male_x_haploids_ampl_and_admix_masked')
# if not os.path.exists(ampl_and_admix_masked_male_x_haploids_dir):
# os.makedirs(ampl_and_admix_masked_male_x_haploids_dir)
# ampl_and_admix_masked_male_x_haploids = [modpath(x, parent=ampl_and_admix_masked_male_x_haploids_dir, suffix='.fa') for x in male_x_haploids_ampl_masked]
# for i, (unmasked, masked) in enumerate(zip(male_x_haploids_ampl_masked, ampl_and_admix_masked_male_x_haploids)):
# gwf.target_from_template("admixmask2_x_{}".format(i),
# admix_mask(unmasked_file=str(unmasked), masked_file=str(masked),
# admix_pred_file=laurits_admix_pred_file, min_post_prob=min_admix_post_prob))
# #################################################################################
# # compute diffs in windows over all male x haplotypes
# #################################################################################
# # dir for files
# male_dist_dir = os.path.join(mydir, 'steps', 'male_x_haploid_dist')
# if not os.path.exists(male_dist_dir):
# os.makedirs(male_dist_dir)
# male_dist_file_names = list()
# i = 0
# # iter male haploid pairs
# for file1, file2 in itertools.combinations(sorted(male_x_haploids), 2):
# indiv1, hap1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
# indiv2, hap2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
# # we do not compare chromosome from the same
# # individul to avoid inbreeding arterfcts
# if indiv1 == indiv2:
# continue
# # open files for the pair of pseudohaploids to compare
# f1 = modpath(file1, parent=male_x_haploids_dir)
# f2 = modpath(file2, parent=male_x_haploids_dir)
# output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, hap1, indiv2, hap2, bp2str(dist_binsize))
# out_file_name = modpath(output_base_name, parent=male_dist_dir)
# male_dist_file_names.append(out_file_name)
# gwf.target_from_template('male_dist_windows1_{}'.format(i), dist_for_x_pair_template(str(f1), str(f2),
# dist_binsize, 'NA', indiv1, hap1, indiv2, hap2, str(out_file_name)))
# i += 1
# #################################################################################
# # same but for male haplotypes with ampliconic regions masked out
# #################################################################################
# # dir for files
# male_dist_dir_ampl_masked = os.path.join(mydir, 'steps', 'male_x_haploid_dist_ampl_masked')
# if not os.path.exists(male_dist_dir_ampl_masked):
# os.makedirs(male_dist_dir_ampl_masked)
# male_ampl_masked_dist_file_names = list()
# i = 0
# # iter male haploid pairs
# for file1, file2 in itertools.combinations(sorted(male_x_haploids_ampl_masked), 2):
# indiv1, hap1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
# indiv2, hap2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
# # we do not compare chromosome from the same
# # individul to avoid inbreeding arterfcts
# if indiv1 == indiv2:
# continue
# # open files for the pair of pseudohaploids to compare
# f1 = modpath(file1, parent=male_x_haploids_ampl_masked_dir)
# f2 = modpath(file2, parent=male_x_haploids_ampl_masked_dir)
# output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, hap1, indiv2, hap2, bp2str(dist_binsize))
# out_file_name = modpath(output_base_name, parent=male_dist_dir_ampl_masked)
# male_ampl_masked_dist_file_names.append(out_file_name)
# gwf.target_from_template('male_dist_windows2_{}'.format(i), dist_for_x_pair_template(str(f1), str(f2),
# dist_binsize, 'NA', indiv1, hap1, indiv2, hap2, str(out_file_name)))
# i += 1
#################################################################################
# same but for the admix-masked haplotypes (produces separate stats for admix masked)
#################################################################################
# dir for files
male_admix_masked_dist_dir = os.path.join(mydir, 'steps', 'male_x_haploid_dist_admix_masked')
if not os.path.exists(male_admix_masked_dist_dir):
os.makedirs(male_admix_masked_dist_dir)
male_admix_masked_dist_file_names = list()
i = 0
# iter male haploid pairs
for file1, file2 in itertools.combinations(sorted(admix_masked_male_x_haploids), 2):
indiv1, hap1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
indiv2, hap2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
# we do not compare chromosome from the same
# individul to avoid inbreeding arterfcts
if indiv1 == indiv2:
continue
# open files for the pair of pseudohaploids to compare
f1 = modpath(file1, parent=admix_masked_male_x_haploids_dir)
f2 = modpath(file2, parent=admix_masked_male_x_haploids_dir)
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, hap1, indiv2, hap2, bp2str(dist_binsize))
out_file_name = modpath(output_base_name, parent=male_admix_masked_dist_dir)
male_admix_masked_dist_file_names.append(out_file_name)
gwf.target_from_template('male_dist_admix_masked_windows1_{}'.format(i), admix_masked_dist_for_x_pair_template(str(f1), str(f2),
dist_binsize, 'NA', indiv1, hap1, indiv2, hap2, str(out_file_name)))
i += 1
# #################################################################################
# # same but for the admix-masked haplotypes WITH ampliconic regions masked out (produces separate stats for admix masked)
# #################################################################################
# # dir for files
# male_ampl_and_admix_masked_dist_dir = os.path.join(mydir, 'steps', 'male_x_haploid_dist_ampl_and_admix_masked')
# if not os.path.exists(male_ampl_and_admix_masked_dist_dir):
# os.makedirs(male_ampl_and_admix_masked_dist_dir)
# male_ampl_and_admix_masked_dist_file_names = list()
# i = 0
# # iter male haploid pairs
# for file1, file2 in itertools.combinations(sorted(ampl_and_admix_masked_male_x_haploids), 2):
# indiv1, hap1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
# indiv2, hap2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
# # we do not compare chromosome from the same
# # individul to avoid inbreeding arterfcts
# if indiv1 == indiv2:
# continue
# # open files for the pair of pseudohaploids to compare
# f1 = modpath(file1, parent=ampl_and_admix_masked_male_x_haploids_dir)
# f2 = modpath(file2, parent=ampl_and_admix_masked_male_x_haploids_dir)
# output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, hap1, indiv2, hap2, bp2str(dist_binsize))
# out_file_name = modpath(output_base_name, parent=male_ampl_and_admix_masked_dist_dir)
# male_ampl_and_admix_masked_dist_file_names.append(out_file_name)
# gwf.target_from_template('male_dist_admix_masked_windows2_{}'.format(i), admix_masked_dist_for_x_pair_template(str(f1), str(f2),
# dist_binsize, 'NA', indiv1, hap1, indiv2, hap2, str(out_file_name)))
# i += 1
# #################################################################################
# # Build distance data sets and call sweeps for each male chromosome with added meta info
# #################################################################################
# # dir for files
# male_dist_store_dir = os.path.join(mydir, 'steps', 'male_dist_stores')
# if not os.path.exists(male_dist_store_dir):
# os.makedirs(male_dist_store_dir)
# #male_dist_store_base_names = ["male_dist_data_{}_{}".format(x, bp2str(dist_binsize)) for x in hg19_chrom_sizes.hg19_chrom_sizes.keys()]
# male_dist_store_base_name = "male_dist_data_chrX_{}".format(bp2str(dist_binsize))
# male_dist_store_file = modpath(male_dist_store_base_name, parent=male_dist_store_dir, suffix='.hdf')
# g = gwf.target("build_male_dist_datasets1", inputs=male_dist_file_names, outputs=[male_dist_store_file],
# memory='80g', walltime='11:00:00') << """
# conda activate simons
# python scripts/build_male_dist_datasets.py \
# --dist-dir {dist_dir} \
# --meta-data-dir {metadata_dir} \
# --out-file {out_file}
# """.format(dist_dir=male_dist_dir, out_file=male_dist_store_file, metadata_dir=metadata_dir)
# #################################################################################
# # Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
# #################################################################################
# male_dist_sweep_data_file = os.path.join(male_dist_store_dir, "sweep_data_{}_{}.hdf".format(analysis_globals.pwdist_cutoff,
# analysis_globals.min_sweep_clade_size))
# gwf.target_from_template('male_dist_sweep_data', sweep_data(male_dist_store_file, male_dist_sweep_data_file))
# #################################################################################
# # same but with ampliconic regions masked out
# #################################################################################
# # dir for files
# male_dist_ampl_masked_store_dir = os.path.join(mydir, 'steps', 'male_dist_ampl_masked_stores')
# if not os.path.exists(male_dist_ampl_masked_store_dir):
# os.makedirs(male_dist_ampl_masked_store_dir)
# male_dist_ampl_masked_store_base_name = "male_dist_data_chrX_{}".format(bp2str(dist_binsize))
# male_dist_ampl_masked_store_file = modpath(male_dist_ampl_masked_store_base_name, parent=male_dist_ampl_masked_store_dir, suffix='.hdf')
# g = gwf.target("build_male_dist_datasets2", inputs=male_ampl_masked_dist_file_names, outputs=[male_dist_ampl_masked_store_file],
# memory='80g', walltime='11:00:00') << """
# conda activate simons
# python scripts/build_male_dist_datasets.py \
# --dist-dir {dist_dir} \
# --meta-data-dir {metadata_dir} \
# --out-file {out_file}
# """.format(dist_dir=male_dist_dir_ampl_masked, out_file=male_dist_ampl_masked_store_file, metadata_dir=metadata_dir)
# #################################################################################
# # Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
# #################################################################################
# male_dist_ampl_masked_sweep_data_file = os.path.join(male_dist_ampl_masked_store_dir, "sweep_data_{}_{}.hdf".format(analysis_globals.pwdist_cutoff,
# analysis_globals.min_sweep_clade_size))
# gwf.target_from_template('male_dist_ampl_masked_sweep_data', sweep_data(male_dist_ampl_masked_store_file, male_dist_ampl_masked_sweep_data_file))
#################################################################################
# same but for the admix-masked haplotypes
#################################################################################
# # dir for files
# male_dist_admix_masked_store_dir = os.path.join(mydir, 'steps', 'male_dist_admix_masked_stores')
# if not os.path.exists(male_dist_admix_masked_store_dir):
# os.makedirs(male_dist_admix_masked_store_dir)
# male_dist_admix_masked_store_base_name = "male_dist_data_chrX_{}".format(bp2str(dist_binsize))
# male_dist_admix_masked_store_file = modpath(male_dist_admix_masked_store_base_name, parent=male_dist_admix_masked_store_dir, suffix='.hdf')
# g = gwf.target("build_male_dist_admix_masked_datasets1", inputs=male_admix_masked_dist_file_names, outputs=[male_dist_admix_masked_store_file],
# memory='80g', walltime='11:00:00') << """
# conda activate simons
# python scripts/build_male_dist_admix_masked_datasets.py \
# --dist-dir {dist_dir} \
# --meta-data-dir {metadata_dir} \
# --out-file {out_file}
# """.format(dist_dir=male_admix_masked_dist_dir, out_file=male_dist_admix_masked_store_file, metadata_dir=metadata_dir)
########## NEW VERSION ########################
#################################################################################
# Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
#################################################################################
male_dist_admix_masked_store_dir = os.path.join(mydir, 'steps', 'male_dist_admix_masked_stores')
if not os.path.exists(male_dist_admix_masked_store_dir):
os.makedirs(male_dist_admix_masked_store_dir)
male_dist_admix_masked_store_base_name = "male_dist_data_chrX_{}".format(bp2str(dist_binsize))
male_dist_admix_masked_store_file = modpath(male_dist_admix_masked_store_base_name, parent=male_dist_admix_masked_store_dir, suffix='.hdf')
male_dist_twice_admix_masked_store_file = modpath(male_dist_admix_masked_store_base_name + '_twice',
parent=os.path.dirname(male_dist_admix_masked_store_file), suffix='.hdf')
g = gwf.target("build_male_dist_admix_masked_datasets1",
inputs=male_admix_masked_dist_file_names,
outputs=[male_dist_admix_masked_store_file, male_dist_twice_admix_masked_store_file],
memory='16g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_male_dist_admix_masked_datasets.py \
--dist-dir {dist_dir} \
--meta-data-dir {metadata_dir} \
--out-file {out_file} \
--dist-twice-out-file {dist_twice_out_file}
""".format(dist_dir=male_admix_masked_dist_dir, out_file=male_dist_admix_masked_store_file, metadata_dir=metadata_dir,
dist_twice_out_file=male_dist_twice_admix_masked_store_file)
#################################################################################
# Same but including Ust Ishim:
# Also adjusts distances to ust ishim by adding distance corresponding to 45000 years
#################################################################################
male_dist_admix_masked_store_base_name_with_ust_ishim = "male_dist_data_with_ust_ishim_chrX_{}".format(bp2str(dist_binsize))
male_dist_admix_masked_store_file_with_ust_ishim = modpath(male_dist_admix_masked_store_base_name_with_ust_ishim, parent=male_dist_admix_masked_store_dir, suffix='.hdf')
male_dist_twice_admix_masked_store_file_with_ust_ishim = modpath(male_dist_admix_masked_store_base_name_with_ust_ishim + '_twice',
parent=os.path.dirname(male_dist_admix_masked_store_file_with_ust_ishim), suffix='.hdf')
g = gwf.target("build_male_dist_admix_masked_datasets_with_ust_ishim",
inputs=male_admix_masked_dist_file_names,
outputs=[male_dist_admix_masked_store_file_with_ust_ishim, male_dist_twice_admix_masked_store_file_with_ust_ishim],
memory='16g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_male_dist_admix_masked_datasets.py \
--dist-dir {dist_dir} \
--meta-data-dir {metadata_dir} \
--out-file {out_file} \
--dist-twice-out-file {dist_twice_out_file} \
--include-ust-ishim
""".format(dist_dir=male_admix_masked_dist_dir, out_file=male_dist_admix_masked_store_file_with_ust_ishim, metadata_dir=metadata_dir,
dist_twice_out_file=male_dist_twice_admix_masked_store_file_with_ust_ishim)
########## NEW VERSION ########################
#################################################################################
# Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
#################################################################################
# male_dist_admix_masked_sweep_data_file = \
# os.path.join(male_dist_admix_masked_store_dir, "sweep_data_{}_{}.hdf".format(analysis_globals.pwdist_cutoff,
# analysis_globals.min_sweep_clade_size))
# male_dist_admix_masked_dist_twice_file = modpath(male_dist_admix_masked_store_base_name + '_twice', parent=male_dist_admix_masked_store_dir, suffix='.hdf')
# gwf.target_from_template('male_dist_admix_masked_sweep_data', sweep_data(male_dist_admix_masked_store_file,
# male_dist_admix_masked_sweep_data_file,
# dump_dist_twice=male_dist_admix_masked_dist_twice_file))
########## NEW VERSION ########################
male_dist_admix_masked_sweep_data_files = defaultdict(list)
for pwdist_cutoff in [analysis_globals.pwdist_cutoff]:
for min_sweep_clade_percent in range(0, 100, 5): # changed this from 1 to 5 may 23 2021
sweep_stat_dir = os.path.join(male_dist_admix_masked_store_dir, str(pwdist_cutoff))
if not os.path.exists(sweep_stat_dir):
os.makedirs(sweep_stat_dir)
male_dist_admix_masked_sweep_data_file = modpath("sweep_data_{}_{}%.hdf".format(pwdist_cutoff, min_sweep_clade_percent),
parent=sweep_stat_dir)
male_dist_admix_masked_sweep_data_files[pwdist_cutoff].append(male_dist_admix_masked_sweep_data_file)
gwf.target_from_template('male_dist_admix_masked_sweep_data_{:f}_{}'.format(
pwdist_cutoff, min_sweep_clade_percent),
sweep_data(male_dist_twice_admix_masked_store_file,
male_dist_admix_masked_sweep_data_file,
min_sweep_clade_percent,
pwdist_cutoff ))
########## NEW VERSION ########################
#################################################################################
# Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
#################################################################################
male_dist_admix_masked_clique_data_files = defaultdict(list)
for pwdist_cutoff in [analysis_globals.pwdist_cutoff]:
for min_sweep_clade_percent in range(0, 100+5, 5):
sweep_stat_dir = os.path.join(male_dist_admix_masked_store_dir, str(pwdist_cutoff))
if not os.path.exists(sweep_stat_dir):
os.makedirs(sweep_stat_dir)
male_dist_admix_masked_clique_data_file = modpath("clique_data_{}_{}%.hdf".format(pwdist_cutoff, min_sweep_clade_percent),
parent=sweep_stat_dir)
male_dist_admix_masked_clique_data_files[pwdist_cutoff].append(male_dist_admix_masked_clique_data_file)
gwf.target_from_template('male_dist_admix_masked_clique_data_{:f}_{}'.format(
pwdist_cutoff, min_sweep_clade_percent),
clique_data(male_dist_twice_admix_masked_store_file,
male_dist_admix_masked_clique_data_file,
min_sweep_clade_percent,
pwdist_cutoff ))
#################################################################################
# Same but including Ust Ishim (for calling ECH in Ust Ishim)
#################################################################################
male_dist_admix_masked_clique_data_files_with_ust_ishim = defaultdict(list)
for pwdist_cutoff in [analysis_globals.pwdist_cutoff]:
for min_sweep_clade_percent in range(0, 100+5, 5):
sweep_stat_dir = os.path.join(male_dist_admix_masked_store_dir, str(pwdist_cutoff))
if not os.path.exists(sweep_stat_dir):
os.makedirs(sweep_stat_dir)
male_dist_admix_masked_clique_data_file_with_ust_ishim = modpath("clique_data_with_ust_ishim_{}_{}%.hdf".format(pwdist_cutoff, min_sweep_clade_percent),
parent=sweep_stat_dir)
male_dist_admix_masked_clique_data_files_with_ust_ishim[pwdist_cutoff].append(male_dist_admix_masked_clique_data_file_with_ust_ishim)
gwf.target_from_template('male_dist_admix_masked_clique_data_with_ust_ishim_{:f}_{}'.format(
pwdist_cutoff, min_sweep_clade_percent),
clique_data(male_dist_twice_admix_masked_store_file_with_ust_ishim,
male_dist_admix_masked_clique_data_file_with_ust_ishim,
min_sweep_clade_percent,
pwdist_cutoff ))
#################################################################################
# Use hundred sweep calls to largetst min_sweep_clade_percent that allow
# a sweep to be called (AKA mixcalling)
#################################################################################
# # NOTE: abandoned mixcalling. I realized that it does not report cliques.
# for pwdist_cutoff in [analysis_globals.pwdist_cutoff]:
# sweep_data_dir = os.path.dirname(male_dist_admix_masked_sweep_data_files[pwdist_cutoff][0])
# sweep_data_mixcall_file = modpath("sweep_data_mixcall_{}.hdf".format(pwdist_cutoff),
# parent=os.path.dirname(sweep_data_dir))
# g = gwf.target("sweep_data_mixcalling_{:f}".format(pwdist_cutoff),
# inputs=male_dist_admix_masked_sweep_data_files[pwdist_cutoff],
# outputs=[sweep_data_mixcall_file],
# memory='30g', walltime='1:00:00') << """
# source ./scripts/conda_init.sh
# conda activate simons
# python scripts/sweep_mixcalling.py {sweep_data_inputdir} {sweep_data_outfile}
# """.format(sweep_data_inputdir=sweep_data_dir, sweep_data_outfile=sweep_data_mixcall_file)
# #################################################################################
# # same but for the ampliconic region masked AND admix-masked haplotypes
# #################################################################################
# # dir for files
# male_dist_ampl_and_admix_masked_store_dir = os.path.join(mydir, 'steps', 'male_dist_ampl_and_admix_masked_stores')
# if not os.path.exists(male_dist_ampl_and_admix_masked_store_dir):
# os.makedirs(male_dist_ampl_and_admix_masked_store_dir)
# male_dist_ampl_and_admix_masked_store_base_name = "male_dist_data_chrX_{}".format(bp2str(dist_binsize))
# male_dist_ampl_and_admix_masked_store_file = modpath(male_dist_ampl_and_admix_masked_store_base_name, parent=male_dist_ampl_and_admix_masked_store_dir, suffix='.store')
# g = gwf.target("build_male_dist_admix_masked_datasets2", inputs=male_ampl_and_admix_masked_dist_file_names, outputs=[male_dist_ampl_and_admix_masked_store_file],
# memory='80g', walltime='11:00:00') << """
# conda activate simons
# python scripts/build_male_dist_admix_masked_datasets.py \
# --dist-dir {dist_dir} \
# --meta-data-dir {metadata_dir} \
# --out-file {out_file}
# """.format(dist_dir=male_ampl_and_admix_masked_dist_dir, out_file=male_dist_ampl_and_admix_masked_store_file, metadata_dir=metadata_dir)
# #################################################################################
# # Call sweeps on the distance data with given pwdist_cutoff and min_sweep_clade_size
# #################################################################################
# male_dist_ampl_and_admix_masked_sweep_data_file = os.path.join(male_dist_ampl_and_admix_masked_store_dir, "sweep_data_{}_{}.hdf".format(analysis_globals.pwdist_cutoff,
# analysis_globals.min_sweep_clade_size))
# gwf.target_from_template('male_dist_ampl_and_admix_masked_sweep_data', sweep_data(male_dist_ampl_and_admix_masked_store_file, male_dist_ampl_and_admix_masked_sweep_data_file))
#################################################################################
# compute additional diffs between archaic female pseudohaplotids and all male x haplotypes
#################################################################################
# dir for files
archaic_dist_dir = os.path.join(mydir, 'steps', 'archaic_x_pseudohaploid_dist')
if not os.path.exists(archaic_dist_dir):
os.makedirs(archaic_dist_dir)
archaic_dist_file_names = list()
i = 0
for file1, file2 in itertools.product(sorted(male_x_haploids), archaic_x_pseudohaploids):
indiv1, hap1 = re.search(r'/([^/]+)-([AB]).fa', str(file1)).groups()
indiv2, hap2 = re.search(r'/([^/]+)-([AB]).fa', str(file2)).groups()
output_base_name = '{}_{}_{}_{}_{}.pickle' .format(indiv1, hap1, indiv2, hap2, bp2str(dist_binsize))
out_file_name = modpath(output_base_name, parent=archaic_dist_dir)
archaic_dist_file_names.append(out_file_name)
gwf.target_from_template('archaic_dist_windows_{}'.format(i), dist_for_x_pair_template(str(file1), str(file2),
dist_binsize, 'NA', indiv1, hap1, indiv2, hap2, str(out_file_name)))
i += 1
#################################################################################
# Build distance data sets for archaic pseudohaploids and male x chromosomes
#################################################################################
# dir for files
archaic_dist_store_dir = os.path.join(mydir, 'steps', 'archaic_dist_stores')
if not os.path.exists(archaic_dist_store_dir):
os.makedirs(archaic_dist_store_dir)
#male_dist_store_base_names = ["male_dist_data_{}_{}".format(x, bp2str(dist_binsize)) for x in hg19_chrom_sizes.hg19_chrom_sizes.keys()]
archaic_dist_store_base_name = "archaic_dist_data_chrX_{}".format(bp2str(dist_binsize))
archaic_dist_store_file = modpath(archaic_dist_store_base_name, parent=archaic_dist_store_dir, suffix='.hdf')
g = gwf.target("build_archaic_dist_datasets", inputs=archaic_dist_file_names, outputs=[archaic_dist_store_file],
memory='10g', walltime='11:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/build_archaic_dist_datasets.py \
--dist-dir {dist_dir} \
--meta-data-dir {metadata_dir} \
--out-file {out_file}
""".format(dist_dir=archaic_dist_dir, out_file=archaic_dist_store_file, metadata_dir=metadata_dir)
#################################################################################
# make fasta alignments of male x chromosomes
#################################################################################
argweaver_binsize = 100000
def argweaver_input_targets(region_label, male_x_haploids):
# dir for files
argweaver_input_dir = os.path.join(mydir, 'steps', 'argweaver', 'input', region_label)
if not os.path.exists(argweaver_input_dir):
os.makedirs(argweaver_input_dir)
# make list of expected argweaver fasta input files:
argweaver_input_files = list()
chrom_len = hg19_chrom_sizes.hg19_chrom_sizes['chrX']
for i in range(0, chrom_len - argweaver_binsize, argweaver_binsize):
file_name = 'X-{:09d}-{:09d}.fa'.format(i, min(chrom_len, i+argweaver_binsize)) # NB: next time I should format the input files like the stores...
#file_path = os.path.join(mydir, 'steps', 'argweaver', 'input', file_name)
#file_path = argweaver_input_dir / file_name
file_path = modpath(file_name, parent=argweaver_input_dir)
argweaver_input_files.append(file_path)
# gwf.target('fasta_align_{}'.format(region_label)) << fasta_alignments(list(map(str, male_x_haploids)),
# list(map(str, argweaver_input_files)),
# argweaver_binsize, str(argweaver_input_dir))
gwf.target_from_template('fasta_align_{}'.format(region_label), fasta_alignments(list(map(str, male_x_haploids)),
list(map(str, argweaver_input_files)),
argweaver_binsize, str(argweaver_input_dir)))
return argweaver_input_files
argweaver_input_files = dict()
#male_x_haploids_subset = [x for x in male_x_haploids if x.name.replace('-A.fa', '') in individuals]
male_x_haploids_subset = [x for x in male_x_haploids if os.path.basename(x).replace('-A.fa', '') in individuals]
# (for world set we also filter to only get individuals not filtereded out of the meta data info)
argweaver_input_files['World'] = argweaver_input_targets('World', male_x_haploids_subset)
for region_label in list(regions.keys()):
male_x_haploids_subset = [x for x in male_x_haploids if os.path.basename(x).replace('-A.fa', '') in regions[region_label]]
argweaver_input_files[region_label] = argweaver_input_targets(region_label, male_x_haploids_subset)
#################################################################################
# argweaver
#################################################################################
region_labels = ['World'] + list(regions.keys())
def run_argweaver_analysis(region_label, argweaver_input_files):
argweaver_output_dir = os.path.join(mydir, 'steps', 'argweaver', 'output', region_label)
if not os.path.exists(argweaver_output_dir):
os.makedirs(argweaver_output_dir)
argweaver_output_files = list()
for i, input_file in enumerate(argweaver_input_files):
output_file = modpath(input_file, parent=argweaver_output_dir, suffix='.tsv.gz')
argweaver_output_files.append(output_file)
# gwf.target('argweaver_{}_{}'.format(region_label, i)) << argeaver_window_analysis(input_fasta_file=input_file, output_hdf_file=output_file)
gwf.target_from_template('argweaver_{}_{}'.format(region_label, i),
argeaver_window_analysis(input_fasta_file=input_file, output_file=output_file))
return argweaver_output_files
argweaver_output_files = dict()
for region_label in region_labels:
argweaver_output_files[region_label] = run_argweaver_analysis(region_label, argweaver_input_files[region_label])
#################################################################################
# For each analysis window, extract mean tmrca tmrca_half for each chain and group
#################################################################################
for region_label in region_labels:
tmrca_dir = os.path.join(mydir, 'steps', 'argweaver', 'tmrca', region_label)
if not os.path.exists(tmrca_dir):
os.makedirs(tmrca_dir)
for i, input_table_file in enumerate(argweaver_output_files[region_label]):
output_tmrca_file = modpath(input_table_file, parent=tmrca_dir, suffix=('.tsv.gz', '.hdf'))
gwf.target_from_template('tmrca_{}_{}'.format(region_label, i),
compute_tmrca_window_stats(input_table_file, output_tmrca_file))
#################################################################################
# Compute extra tmrca statistics from pruned argweaver trees for World analysis
#################################################################################
excluded_pops = ','.join(simons_meta_data.excluded_populations)
excluded_indivs = ','.join(simons_meta_data.excluded_individuals)
# dir for files
annotated_output_dir = os.path.join(mydir, 'steps', 'argweaver', 'annotated_output', 'World')
if not os.path.exists(annotated_output_dir):
os.makedirs(annotated_output_dir)
argweaver_annotated_output_files = defaultdict(list)
for region_label in ['World']:
for i, input_file_name in enumerate(argweaver_output_files[region_label]):
output_extra_file = modpath(input_file_name, parent=annotated_output_dir)
gwf.target_from_template('argweaver_extra_stats_{}_{}'.format(region_label, i),
argweaver_extra_stats(input_file_name, output_extra_file, excluded_pops, excluded_indivs))
argweaver_annotated_output_files[region_label].append(output_extra_file)
#################################################################################
# For the extra stats or pruned trees, for each analysis window, extract mean tmrca tmrca_half for each chain and group
#################################################################################
for region_label in ['World']:
tmrca_extra_dir = os.path.join(mydir, 'steps', 'argweaver', 'tmrca_extra', region_label)
if not os.path.exists(tmrca_extra_dir):
os.makedirs(tmrca_extra_dir)
for i, input_table_file in enumerate(argweaver_annotated_output_files[region_label]):
output_tmrca_extra_file = modpath(input_table_file, parent=tmrca_extra_dir, suffix=('.tsv.gz', '.hdf'))
gwf.target_from_template('tmrca_extra_{}_{}'.format(region_label, i),
compute_extra_tmrca_window_stats(input_table_file, output_tmrca_extra_file))
#################################################################################
# additional summary stats for each sampled tree
#################################################################################
# for region_label in region_labels:
# stats_dir = os.path.join(mydir, 'steps', 'argweaver', 'stats', region_label)
# if not os.path.exists(stats_dir):
# os.makedirs(stats_dir)
# for i, input_hdf_file in enumerate(argweaver_output_files[region_label]):
# output_hdf_file = modpath(input_hdf_file, parent=stats_dir, suffix=('.tsv.gz', '.hdf'))
# component_hdf_file = modpath(input_hdf_file, parent=stats_dir, suffix=('.tsv.gz', '.comp.hdf'))
# component_stats_hdf_file = modpath(input_hdf_file, parent=stats_dir, suffix=('.tsv.gz', '.compstats.hdf'))
# sweep_sister_clade_hdf_file = modpath(input_hdf_file, parent=stats_dir, suffix=('.tsv.gz', '.sweepsister.hdf'))
# nonsweep_sister_clade_hdf_file = modpath(input_hdf_file, parent=stats_dir, suffix=('.tsv.gz', '.nonsweepsister.hdf'))
# gwf.target_from_template('treestats_{}_{}'.format(region_label, i),
# compute_tree_stats(input_hdf_file, output_hdf_file,
# component_hdf_file, component_stats_hdf_file,
# sweep_sister_clade_hdf_file, nonsweep_sister_clade_hdf_file))
#################################################################################
# liftovers
#################################################################################
def reciprocal_liftover(intervals_files, forwards_chain_file, backwards_chain_file,
slurm_tag, steps_dir, target_chromosomes):
"""
Does reciprocal lift over of a set of intervals.
"""
if not steps_dir.exists():
os.makedirs(str(steps_dir))
# output files
mapped_files= [steps_dir / x.with_suffix('.mapped').name for x in intervals_files]
unmapped_files = [x.with_suffix('.unmapped') for x in mapped_files]
backmapped_files = [x.with_suffix('.backmapped') for x in mapped_files]
unbackmapped_files = [x.with_suffix('.nobackmapped') for x in mapped_files]
filtered_files = [x.with_suffix('.filtered') for x in mapped_files]
lifted_files = [steps_dir / 'sorted' / "{}.bed".format(x) for x in target_chromosomes]
for i in range(len(intervals_files)):
# lift over intervals
gwf.target_from_template('{}_lift_{}'.format(slurm_tag, i),
liftover(bed_file=intervals_files[i], chain_file=forwards_chain_file,
mapped_file=mapped_files[i], unmapped_file=unmapped_files[i]))
# lift back to orginal coordinates to ensure one to one correspondence
gwf.target_from_template('{}_liftback_{}'.format(slurm_tag, i),
liftover(bed_file=mapped_files[i], chain_file=backwards_chain_file,
mapped_file=backmapped_files[i], unmapped_file=unbackmapped_files[i]))
# filter out intervals that does not map both ways
gwf.target_from_template('{}_filter_{}'.format(slurm_tag, i),
bed_difference(bed_file1=mapped_files[i], bed_file2=unbackmapped_files[i],
output_file=filtered_files[i]))
# filter out intervals that does not map both ways
gwf.target_from_template('{}_merge_and_split'.format(slurm_tag),
bed_merge_and_split(input_files=filtered_files, output_files=lifted_files))
return lifted_files
# split map file per chromosome......
chains_dir = Path('data/chain_files')
# decode hg38 map files
hg38_map_files = []
# chromosomes we are interested in (not other random contigs)
target_chromosomes = ['chr{}'.format(x) for x in list(range(1,23)) + ['X']]
# lift decode map from hg38 to hg19
hg19_map_files = reciprocal_liftover(hg38_map_files,
forwards_chain_file=chains_dir/'hg38ToHg19.over.chain',
backwards_chain_file=chains_dir/'hg19ToHg38.over.chain',
slurm_tag='liftover',
steps_dir=Path(os.getcwd(), 'steps', 'decode_liftover'),
target_chromosomes=target_chromosomes)
#################################################################################
# smc++
#################################################################################
dedicated_individuals =[
'LP6005443-DNA_A06', # S_Greek-2, WestEurasia
'LP6005443-DNA_D06', # S_Korean-1, EastAsia
'LP6005519-DNA_D05', # S_Irula-2, SouthAsia
'LP6005443-DNA_D02', # S_Yakut-2, CentralAsiaSiberia
'LP6005443-DNA_F08', # S_Papuan-9, Oceania
'LP6005441-DNA_E10', # S_Pima-1, America
]
max_missing = 100000
mutation_rate = 1.52e-08 # 5.25e-10 * 29
#mutation_rate = 1.247e-08
generation_time = 29
# run vcfmerge
gwf.target(name='vcfmerge',
inputs=['results/analyzed_individuals.csv'],
outputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz'],
walltime='03:00:00',
memory='8g') << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/vcf
tail -n +2 ~/simons/faststorage/people/kmt/results/analyzed_individuals.csv | grep -v Africa | cut -f 1 -d ',' | grep -f - ~/simons/faststorage/data/metadata/nature18964-s2-fixed-genders.csv | cut -f 3 -d ';' | awk '$1="/home/kmt/simons/faststorage/data/vcfs/"$1".annotated.nh2.variants.vcf.gz"' > steps/smcpp/vcf/nonafr_analyzed_individuals_vcf_files.txt
bcftools merge --regions 7 --missing-to-ref -Oz -o steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz --file-list steps/vcf/nonafr_analyzed_individuals_vcf_files.txt
tabix steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz
"""
# sfs and r2 for vcf file
gwf.target(name='sfs_r2',
inputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz'],
outputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz',
'steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.frq',
'steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.geno.ld'],
walltime='07:00:00',
memory='8g') << f"""
source ./scripts/conda_init.sh
conda activate samtools
mkdir -p steps/smcpp/vcf
# Add ancestral allele info to vcf:
zcat data/human_ancestor_GRCh37_e71/homo_sapiens_ancestor_7.fa.gz | sed 's,^>.*,>7,' | tr a-z A-Z | bgzip -c > steps/smcpp/vcf/human_ancestral_7.fa.gz
samtools faidx steps/smcpp/vcf/human_ancestral_7.fa.gz
zcat steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz | fill-aa -a steps/smcpp/vcf/human_ancestral_ | gzip -c > steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz
# compute polarized allele frequencies
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa --freq2 --derived
# compute r2
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa --geno-r2 --min-r2 0.01 --ld-window-bp 500000
"""
# same but only for west eurasian samples west and north of instanbul
istanbul_coord = (41.0082, 28.9784)
vcf_subset_samples_regex = '|'.join([indiv for (indiv, info) in individuals.items() if info['Region'] == 'WestEurasia' and info['Latitude'] > istanbul_coord[0] and info['Longitude'] < istanbul_coord[1]])
gwf.target(name='sfs_r2_eur',
inputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz'],
outputs=['steps/smcpp/vcf/nonafr_europeans_chr7_with_aa.vcf.gz',
'steps/smcpp/vcf/nonafr_europeans_chr7_with_aa.frq',
'steps/smcpp/vcf/nonafr_europeans_chr7_with_aa.geno.ld'],
walltime='07:00:00',
memory='8g') << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/vcf
SAMPLES=`grep -E '{vcf_subset_samples_regex}' ~/simons/faststorage/data/metadata/nature18964-s2-fixed-genders.csv | grep XY | cut -f 3 -d ';' | tr '\n' ',' | sed 's/.$//'`
# extract european samples from vcf
bcftools view -s $SAMPLES steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz > steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa.vcf.gz
# compute polarized allele frequencies
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa --freq2 --derived
# compute r2
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa --geno-r2 --min-r2 0.01 --ld-window-bp 500000
"""
# same but only for papuans:
vcf_subset_samples_regex = '|'.join([indiv for (indiv, info) in individuals.items() if info['Population ID'] == 'Papuan'])
gwf.target(name='sfs_r2_papuans',
inputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz'],
outputs=['steps/smcpp/vcf/nonafr_analyzed_europeans_chr7_with_aa.vcf.gz',
'steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa.frq',
'steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa.geno.ld'],
walltime='07:00:00',
memory='8g') << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/vcf
SAMPLES=`grep -E '{vcf_subset_samples_regex}' ~/simons/faststorage/data/metadata/nature18964-s2-fixed-genders.csv | grep XY | cut -f 3 -d ';' | tr '\n' ',' | sed 's/.$//'`
# extract european samples from vcf
bcftools view -s $SAMPLES steps/smcpp/vcf/nonafr_analyzed_individuals_chr7_with_aa.vcf.gz > steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa.vcf.gz
# compute polarized allele frequencies
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa --freq2 --derived
# compute r2
vcftools --gzvcf steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa.vcf.gz --out steps/smcpp/vcf/nonafr_analyzed_papuans_chr7_with_aa --geno-r2 --min-r2 0.01 --ld-window-bp 500000
"""
# for max_missing in [50000, 100000]:
for max_missing in [100000]:
# run vcf2sms
smc_file_name_list = []
for dedicated_indiv in dedicated_individuals:
smc_file_name = f'steps/smcpp/vcf2smc/{dedicated_indiv}_{max_missing}.txt'
smc_file_name_list.append(smc_file_name)
gwf.target(name=f'smcpp_vcf2smc_{max_missing}_{dedicated_indiv.replace("-", "_")}',
inputs=['steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz'],
outputs=[smc_file_name],
walltime='02:00:00',
memory='8g'
) << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/vcf2smc
SAMPLES=`tail -n +2 ~/simons/faststorage/people/kmt/results/analyzed_individuals.csv | grep -v Africa | cut -f 1 -d ',' | grep -f - ~/simons/faststorage/data/metadata/nature18964-s2-fixed-genders.csv | cut -f 3 -d ';' | tr '\n' ',' | sed 's/.$//'`
singularity run docker://terhorst/smcpp:latest vcf2smc --cores 4 --missing-cutoff {max_missing} -d {dedicated_indiv} {dedicated_indiv} steps/smcpp/vcf/nonafr_analyzed_individuals_chr7.vcf.gz {smc_file_name} 7 nonAfr:$SAMPLES
"""
for dedicated_indiv_list in [dedicated_individuals[:], dedicated_individuals[:-2]]:
for i in range(2):
# run estimate and plot
prefix = f"estimate_{i}_{max_missing}_{'_'.join(dedicated_indiv_list)}"
tasktag = prefix.replace('-', '_')
gwf.target(name=f'estimate_{tasktag}_{i}',
inputs=smc_file_name_list,
outputs=[f'steps/smcpp/{prefix}/model.final.json', f'steps/smcpp/{prefix}/{prefix}.png'],
walltime='4-00:00:00',
cores=10,
memory='16g'
) << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/{prefix}
singularity run docker://terhorst/smcpp:latest estimate -o steps/smcpp/{prefix} --cores 10 --timepoints 35 4e4 {mutation_rate} {' '.join(smc_file_name_list)}
singularity run docker://terhorst/smcpp:latest plot -g {generation_time} -c steps/smcpp/{prefix}/{prefix}.png steps/smcpp/{prefix}/model.final.json
"""
prefix = f"cv_{i}_{max_missing}_{'_'.join(dedicated_indiv_list)}"
tasktag = prefix.replace('-', '_')
gwf.target(name=f'estimate_{tasktag}_{i}',
inputs=smc_file_name_list,
outputs=[f'steps/smcpp/{prefix}/model.final.json', f'steps/smcpp/{prefix}/{prefix}.png'],
walltime='4-00:00:00',
cores=10,
memory='16g'
) << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/{prefix}
singularity run docker://terhorst/smcpp:latest cv --folds 2 -o steps/smcpp/{prefix} --cores 10 --timepoints 35 4e4 {mutation_rate} {' '.join(smc_file_name_list)}
singularity run docker://terhorst/smcpp:latest plot -g {generation_time} -c steps/smcpp/{prefix}/{prefix}.png steps/smcpp/{prefix}/model.final.json
"""
# run estimate and plot
prefix = f"estimate_fixrec_{i}_{max_missing}_{'_'.join(dedicated_indiv_list)}"
tasktag = prefix.replace('-', '_')
gwf.target(name=f'estimate_{tasktag}_{i}',
inputs=smc_file_name_list,
outputs=[f'steps/smcpp/{prefix}/model.final.json', f'steps/smcpp/{prefix}/{prefix}.png'],
walltime='4-00:00:00',
cores=10,
memory='16g'
) << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/{prefix}
singularity run docker://terhorst/smcpp:latest estimate -r 1.13e-8 -o steps/smcpp/{prefix} --cores 10 --timepoints 35 4e4 {mutation_rate} {' '.join(smc_file_name_list)}
singularity run docker://terhorst/smcpp:latest plot -g {generation_time} -c steps/smcpp/{prefix}/{prefix}.png steps/smcpp/{prefix}/model.final.json
"""
prefix = f"cv_fixrec_{i}_{max_missing}_{'_'.join(dedicated_indiv_list)}"
tasktag = prefix.replace('-', '_')
gwf.target(name=f'estimate_{tasktag}_{i}',
inputs=smc_file_name_list,
outputs=[f'steps/smcpp/{prefix}/model.final.json', f'steps/smcpp/{prefix}/{prefix}.png'],
walltime='4-00:00:00',
cores=10,
memory='16g'
) << f"""
source ./scripts/conda_init.sh
conda activate smcpp
mkdir -p steps/smcpp/{prefix}
singularity run docker://terhorst/smcpp:latest cv -r 1.13e-8 --folds 2 -o steps/smcpp/{prefix} --cores 10 --timepoints 35 4e4 {mutation_rate} {' '.join(smc_file_name_list)}
singularity run docker://terhorst/smcpp:latest plot -g {generation_time} -c steps/smcpp/{prefix}/{prefix}.png steps/smcpp/{prefix}/model.final.json
"""
#################################################################################
# drift and recombination simulations
#################################################################################
# drift_rec_outfile = 'results/neutral_freq_sims.hdf'
# gwf.target(name='neutral_freq_sims',
# inputs=[],
# outputs=[drift_rec_outfile],
# walltime='4-00:00:00',
# cores=10,
# memory='16g'
# ) << f"""
# source ./scripts/conda_init.sh
# conda activate simons
# python scripts/neutral_freq_sims.py {drift_rec_outfile}
# """
for span_years in [5000, 10000, 15000, 20000, 25000]:
drift_rec_outfile_spaced = f'results/multinom_sampling_sexavg_spaced_{span_years}.hdf'
drift_rec_outfile_echfreqs = f'results/multinom_sampling_sexavg_echfreqs_{span_years}.hdf'
gwf.target(name=f'multinom_sampling_{span_years}',
inputs=[],
outputs=[drift_rec_outfile_spaced, drift_rec_outfile_echfreqs],
walltime='6-23:00:00',
cores=20,
memory='80g'
) << f"""
source ./scripts/conda_init.sh
conda activate simons
python scripts/multinom_sampling.py --years {span_years} {drift_rec_outfile_spaced} {drift_rec_outfile_echfreqs}
"""
# drift_rec_outfile_spaced = 'results/multinom_sampling_spaced.hdf'
# drift_rec_outfile_echfreqs = 'results/multinom_sampling_echfreqs.hdf'
# gwf.target(name='multinom_sampling',
# inputs=[],
# outputs=[drift_rec_outfile_spaced, drift_rec_outfile_echfreqs],
# walltime='6-23:00:00',
# cores=20,
# memory='80g'
# ) << f"""
# source ./scripts/conda_init.sh
# conda activate simons
# python scripts/multinom_sampling.py --years 5000 --years 10000 --years 15000 --years 20000 --years 25000 {drift_rec_outfile_spaced} {drift_rec_outfile_echfreqs}
# """
# # extra sims assuming spans of 20,000 and 25,000 years:
# drift_rec_outfile_spaced = 'results/multinom_sampling_spaced_extra.hdf'
# drift_rec_outfile_echfreqs = 'results/multinom_sampling_echfreqs_extra.hdf'
# gwf.target(name='multinom_sampling_extra',
# inputs=[],
# outputs=[drift_rec_outfile_spaced, drift_rec_outfile_echfreqs],
# walltime='6-23:00:00',
# cores=20,
# memory='80g'
# ) << f"""
# source ./scripts/conda_init.sh
# conda activate simons
# python scripts/multinom_sampling.py --years 5000 --years 10000 --years 15000 --years 20000 --years 25000 {drift_rec_outfile_spaced} {drift_rec_outfile_echfreqs}
# """
#################################################################################
# slim simulations
#################################################################################
slim_tree_files = list()
slim_dist_files = list()
slim_dist_twice_files = list()
slim_sites_files = list()
slim_vcf_files = list()
slim_geno_vcf_files = list()
slim_ld_files = list()
slim_geno_ld_files = list()
slim_freq_files = list()
sweep_data_files = list()
simulations_dir = os.path.join(mydir, 'steps', 'slim', 'simulations')
slim_output_dir = simulations_dir
slim_sweep_data_dir = os.path.join(mydir, 'steps', 'slim', 'sweep_data')
if not os.path.exists(slim_sweep_data_dir):
os.makedirs(slim_sweep_data_dir)
# get the number of non-africans in out data set.
# this is how many haplotypes we should sample from each simulation:
nr_non_africans = sum(x['Region'] != 'Africa' and x['Genetic sex assignment'] == 'XY' for x in individuals.values())
nr_africans = sum(x['Region'] == 'Africa' and x['Genetic sex assignment'] == 'XY' for x in individuals.values())
# number of generations in forward simulation:
total_sim_generations = 200000
# pasted fro nb_22_slim_simulations notebook:
# standard_demography = \
# [(1, 23434),
# (162010, 13579),
# (183858, 27616),
# (194462, 4107),
# (196870, 3936),
# (197809, 7343),
# (198466, 15066),
# (198926, 37591),
# (199300, 86151),
# (199604, 153716),
# (199791, 210671),
# (199882, 239151),
# (199938, 255465)]
# tennesen CEU from nb_22_slim_simulations notebook:
standard_demography = \
[(1, 15038),
(185354, 15038),
(186450, 29751),
(189244, 29776),
(191533, 29776),
(193592, 29776),
(195329, 3833),
(196567, 3828),
(197254, 3826),
(197711, 3826),
(197892, 2306),
(198009, 2765),
(198178, 3473),
(198347, 4361),
(198517, 5476),
(198686, 6875),
(198855, 8255),
(198956, 9366),
(199043, 10529),
(199130, 11833),
(199217, 13300),
(199304, 14950),
(199391, 16801),
(199478, 19772),
(199565, 36416),
(199652, 76838),
(199739, 162123),
(199826, 342067),
(199913, 715553)]
# pasted from nb_22_slim_simulations notebook:
standard_demography_truncated = \
[]
# test demography for sanity checking
test_demography = \
[(1, 10000)]
sweep_types = ['nosweep']#, 'complete', 'partial']
# pasted fro nb_22_slim_simulations notebook
sweep_generations = [198103] # sweep starts 55000 years ago
# [198965, 198275, 197586, 196896]
# named autosomal population size demographies:
demographies = {
'standard': standard_demography,
'truncated': standard_demography_truncated,
# ('test', test_demography)
}
# African X/A ratio is 0.66 but is further reduce inside regions:
# x_auto_ratios = [0.65 * x for x in [1, 0.71]] # outside and inside regions
# x_auto_ratios = [0.65 * x for x in [1, 0.71]] # outside and inside regions
x_auto_ratios = [0.65, 0.51] # African and non-African
# size reduction beyond 3/4 (slim takes care of the 3/4 book keeping):
size_reductions = [x/0.75 for x in x_auto_ratios] # outside and inside regions
# mean per generation recombination rate in regions (new decode map):
sexavg_rec_rates_per_gen = [0.46e-8, # mean in regions
1.16e-8] # global for chrX
# we only simulate non-africans. So we simulate using a percent cutoff for clade size
# that corresponds to the same number of actual non-africans (~29%):
slim_min_clade_size_in_percent = int(round((analysis_globals.min_sweep_clade_size / (nr_africans + nr_non_africans)) * (nr_africans + nr_non_africans) / nr_non_africans * 100))
# slim_min_clade_size_in_percent = [45, 50, 55]
slim_min_clade_size_in_percent = [50]
# write 10mb recombination map files
target = gwf.target("slim_rec_maps",
inputs=['data/decode_hg38_sexavg_per_gen.tsv'],
outputs=[f'steps/slim/rec_maps/{x}.tsv' for x in range(15)],
memory='1g', walltime='00:10:00') << """
mkdir -p steps/slim/rec_maps
python scripts/slim_rec_maps.py data/decode_hg38_sexavg_per_gen.tsv steps/slim/rec_maps
"""
# nr of simulated sequences
# sim_sample_size = nr_non_africans
sim_sample_size = 49 # number of CEU males
rec_map_files = target.outputs
# generate combinations of parameters to run:
# testing that autosome settings produce expected diversity:
autosome_params = list(itertools.product(
['A'], # chromosome X or A for autosome
['standard'], # demography
[1], # size reductions
[1.13e-8], # mean rec rate (for chrosome 7)
['nosweep'], [0], [0], # type, sweep_generations, sel_coeficients
[slim_min_clade_size_in_percent], # min clade size in percent
[10] # nr replicates
))
# neutral simulations:
neutral_params = list(itertools.product(
['X'], # chromosome X or A for autosome
# ['standard', 'truncated'], # demography
['standard'], # demography
size_reductions,
# sexavg_rec_rates_per_gen,
rec_map_files,
['nosweep'], [0], [0], # type, sweep_generations, sel_coeficients
[slim_min_clade_size_in_percent], # min clade size in percent
[500] # nr replicates
))
# selection simulations:
sweep_params = list(itertools.product(
['X'], # chromosome X or A
['standard'], # demography
size_reductions,
# sexavg_rec_rates_per_gen,
rec_map_files,
# ['complete', 'partial'], sweep_generations, [0.01, 0.1],
['episode'], sweep_generations, [0.01, 0.02, 0.05, 0.1], # using episode, slim_trees.py assumes 10,000 years unless --selectionend is specified
[slim_min_clade_size_in_percent], # clade size in percent
[10] # nr replicates
))
params = neutral_params + autosome_params + sweep_params
# import pprint
# pprint.pprint(list(params))
# sys.exit()
for chrom, demog_name, size_reduction, rec_rate_per_gen, \
sweep_type, sweep_start, selcoef, min_sweep_clade_percent, nr_replicates in params:
demog = demographies[demog_name]
# if chrom == 'X':
# # SLiM needs a rate for when recombination can physically occur (i.e. in the female
# # between the Xs). To get that from the sex averated recombination rate, we need to
# # account for hte fact that only 2/3 of X chromosomes have the oportunity to combine
# # in each generation (assuming even sex ratios).
# meiosis_rec_rate = rec_rate_per_gen * 3 / 2
# mut_per_year = analysis_globals.mut_per_year
# elif chrom == 'A':
# mut_per_year = analysis_globals.auto_mut_per_year
# else:
# assert 0
#####################
if chrom == 'X':
mut_per_year = analysis_globals.mut_per_year
elif chrom == 'A':
mut_per_year = analysis_globals.auto_mut_per_year
else:
assert 0
if type(rec_rate_per_gen) is str:
# refers to a recombination map file. this map is already scaled with 3/2
assert chrom == 'X', "Recombination map only for X"
meiosis_rec_rate = None
recombination_map_file = rec_rate_per_gen
rec_tag = 'recmap_'+modpath(rec_rate_per_gen, suffix='', parent='') # should be an integer string
else:
recombination_map_file = None
if chrom == 'X':
# SLiM needs a rate for when recombination can physically occur (i.e. in the female
# between the Xs). To get that from the sex averated recombination rate, we need to
# account for hte fact that only 2/3 of X chromosomes have the oportunity to combine
# in each generation (assuming even sex ratios).
meiosis_rec_rate = rec_rate_per_gen * 3 / 2
elif chrom == 'A':
meiosis_rec_rate = rec_rate_per_gen
else:
assert 0
rec_tag = f'constrec_{round(rec_rate_per_gen * 1e12)}'
#####################
id_str = '{}_{}_{}_{}_{}_{}_{}'.format(
demog_name, round(size_reduction*100), rec_tag,
chrom, sweep_type, sweep_start, int(selcoef*100))
slim_output_dir = os.path.join(simulations_dir, id_str.replace('_', '/'))
if not os.path.exists(slim_output_dir): os.makedirs(slim_output_dir)
# replicates
for i in range(nr_replicates):
sim_output_prefix = os.path.join(slim_output_dir, "{}_{}".format(id_str, i))
slim_tree_file = sim_output_prefix + '.trees'
slim_tree_files.append(slim_tree_file)
slim_dist_file = sim_output_prefix + '.hdf'
slim_dist_files.append(slim_dist_file)
slim_sites_file = sim_output_prefix + '_sites.hdf'
slim_sites_files.append(slim_sites_file)
slim_vcf_file = sim_output_prefix + '.vcf'
slim_vcf_files.append(slim_vcf_file)
slim_geno_vcf_file = sim_output_prefix + '_geno.vcf'
slim_geno_vcf_files.append(slim_vcf_file)
if chrom == 'A':
slim_ld_file = sim_output_prefix + '.vcf.hap.ld'
slim_ld_files.append(slim_ld_file)
slim_geno_ld_file = sim_output_prefix + '.vcf.geno.ld'
slim_geno_ld_files.append(slim_ld_file)
slim_freq_file = sim_output_prefix + '.vcf.frq'
slim_freq_files.append(slim_freq_file)
# # run the simulation and compute pairwise differences
# gwf.target_from_template("{}_{}_slim".format(id_str, i),
# slim_sim(selcoef, analysis_globals.gen_time,
# '{:.12f}'.format(mut_per_year),
# meiosis_rec_rate,
# nr_non_africans,
# sweep_type, sweep_start, demog,
# chrom, size_reduction,
# total_sim_generations,
# slim_tree_file, slim_dist_file, slim_sites_file,
# slim_vcf_file, slim_geno_vcf_file,
# compute_ld_and_freqs=chrom=='A'))
# run the simulation and compute pairwise differences
gwf.target_from_template("{}_{}_slim".format(id_str, i),
slim_sim(selcoef, analysis_globals.gen_time,
'{:.12f}'.format(mut_per_year),
meiosis_rec_rate, recombination_map_file,
sim_sample_size,
sweep_type, sweep_start, demog,
chrom, size_reduction,
total_sim_generations,
slim_tree_file, slim_dist_file, slim_sites_file,
slim_vcf_file, slim_geno_vcf_file,
compute_ld_and_freqs=chrom=='A'))
# make dist twice file
slim_dist_twice_file = modpath(slim_dist_file, base=modpath(slim_dist_file, parent='', suffix='')+'_twice')
slim_dist_twice_files.append(slim_dist_twice_file)
gwf.target_from_template("{}_{}_dist_twice".format(id_str, i),
slim_dist_twice(slim_dist_file, slim_dist_twice_file))
# for pwdist_cutoff in [analysis_globals.pwdist_cutoff]:
if demog_name == 'truncated':
# HACK to set pairwise distance for use with truncated simulations:
# require that clade has common ancestor before 10k years
# 2 * 10000 * 0.6e-9 = 1.2e-05
pwdist_cutoff = 1.2e-05
else:
pwdist_cutoff = analysis_globals.pwdist_cutoff
sweep_data_dir = os.path.join(slim_sweep_data_dir,
modpath(slim_dist_file, suffix='', parent=''),
str(pwdist_cutoff))
if not os.path.exists(sweep_data_dir):
os.makedirs(sweep_data_dir)
for clique_size in min_sweep_clade_percent:
sweep_data_file = modpath("clique_data_{}_{}%.hdf".format(pwdist_cutoff, clique_size),
parent=sweep_data_dir)
gwf.target_from_template(id_str+'_{}_{:f}_{}'.format(i, pwdist_cutoff, clique_size),
clique_data(slim_dist_twice_file, sweep_data_file,
clique_size, pwdist_cutoff ))
sweep_data_files.append(sweep_data_file)
#################################################################################
# compute prop swept for all slim sweep data in a file that includes simulation info
#################################################################################
slim_summary_file = os.path.join(mydir, 'steps', 'slim', 'slim_summary.hdf')
g = gwf.target("slim_summary",
inputs=sweep_data_files, outputs=[slim_summary_file],
memory='10g', walltime='02:00:00') << """
source ./scripts/conda_init.sh
conda activate simons
python scripts/slim_summary.py {slim_sweep_data_dir} {out_file}
""".format(slim_sweep_data_dir=slim_sweep_data_dir, out_file=slim_summary_file)
#################################################################################
if __name__ == "__main__":
print(len(gwf.targets))
| [
"[email protected]"
]
| |
aecd9b53dfc0f6dc6b969002346dc62541f907ee | c552cf5ed4714a3b5bdeab7af46092ff465b8c6a | /Python/SW Expert Academy/D4/6959. 이상한 나라의 덧셈게임.py | 480bd6b6edb1aadc98eabbbeeea28ef8a2dfe774 | []
| no_license | ksb8320/Algorithm | a786c5ab04e28ae9b3d180a77850899328075443 | 74b33f81eefa4cebf0dd8f1c3d65394d2aede372 | refs/heads/master | 2022-12-17T22:49:17.144572 | 2020-09-23T14:45:21 | 2020-09-23T14:45:21 | 253,751,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | import sys
sys.stdin=open("input.txt")
def plus():
global cnt
while len(lst)>1:
new=lst[0]+lst[1]
if new<10:
lst.pop(0)
lst.pop(0)
lst.insert(0,new)
cnt+=1
else:
lst[0]=1
lst[1]=new-10
cnt+=1
if cnt%2==1:
return "A"
else:
return "B"
for t in range(int(input())):
num=input()
lst=[]
for i in range(len(num)):
lst.append(int(num[i]))
cnt=0
print("#{} {}".format(t+1,plus())) | [
"[email protected]"
]
| |
6355476cfb93b8ed838af833f12252e27623f0f5 | 316b8375a7ef8095f09973d13f5a49bc7fbe7580 | /leetcode/332.py | 06d41eae074e21c5ecfafeb7129b8286192a3c5d | []
| no_license | zhaolijian/suanfa | 9a8d23fbca01d994f7eef24631783c4b7ed25683 | 4f3b25f360f30c0e604ba4dc4d5774ccb5f25b32 | refs/heads/master | 2023-06-08T17:12:41.522937 | 2021-06-27T08:13:16 | 2021-06-27T08:13:16 | 313,269,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | # 给定一个机票的字符串二维数组 [from, to],子数组中的两个成员分别表示飞机出发和降落的机场地点,
# 对该行程进行重新规划排序。所有这些机票都属于一个从 JFK(肯尼迪国际机场)出发的先生,所以该行程必须从 JFK 开始。
# 如果存在多种有效的行程,你可以按字符自然排序返回最小的行程组合。
# 例如,行程 ["JFK", "LGA"] 与 ["JFK", "LGB"] 相比就更小,排序更靠前
# 所有的机场都用三个大写字母表示(机场代码)。
# 假定所有机票至少存在一种合理的行程。
# 方法:Hierholzer 算法
# Hierholzer 算法用于在连通图中寻找欧拉路径,其流程如下:
# 1.从起点出发,进行深度优先搜索。
# 2.每次沿着某条边从某个顶点移动到另外一个顶点的时候,都需要删除这条边。
# 3.如果没有可移动的路径,则将所在节点加入到栈中,并返回。
import collections
class Solution:
def findItinerary(self, tickets):
d = collections.defaultdict(list) #邻接表
for f, t in tickets:
d[f] += [t] #路径存进邻接表
for f in d:
d[f].sort() #邻接表排序
ans = []
def dfs(f): #深搜函数
while d[f]:
dfs(d[f].pop(0))#路径检索
# 往深里找找不到说明该机场为最终降落机场,
ans.insert(0, f) #放在最前
dfs('JFK')
return ans
# 或
from collections import defaultdict
class Solution:
def findItinerary(self, tickets):
d = defaultdict(list)
for start, end in tickets:
d[start].append(end)
for ele in d:
d[ele].sort()
res = []
def dfs(node):
nonlocal res
while d[node]:
dfs(d[node].pop(0))
res = [node] + res
dfs('JFK')
return res
if __name__ == '__main__':
s = Solution()
tickets = [["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]]
print(s.findItinerary(tickets)) | [
"[email protected]"
]
| |
b0dc8fb9f2c594475c812fd9caf3e45d2e9fd16f | ec84ccb07890d5f3cf7b80cd0725875f213ab460 | /attendance/views/master/overtime_master.py | fc706e86aa8d2c12aa3e2a3b99d05ab1e2310da2 | []
| no_license | Imam-Hossain-45/hrm_app | c7a87e0e0572093dca47227c399cd3366bdf5ee9 | cde39d6638b5c14555913d263f6b544522added1 | refs/heads/master | 2023-08-03T05:22:23.398967 | 2021-09-07T15:09:56 | 2021-09-07T15:09:56 | 404,022,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,446 | py | __all__ = [
'OvertimeMasterListView',
'OvertimeMasterCreateView',
'OvertimeMasterUpdateView',
'OvertimeMasterDeleteView',
]
from cimbolic.models import Variable, Formula
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.forms import inlineformset_factory, modelform_factory, RadioSelect
from django.http import HttpResponseRedirect
from django.views.generic import CreateView, DeleteView, ListView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from django.urls import reverse, reverse_lazy
from helpers.mixins import PermissionMixin
from attendance.models.master.overtime_master import *
from helpers.functions import get_organizational_structure
class OvertimeMasterListView(LoginRequiredMixin, PermissionMixin, ListView):
"""
List all the overtime rules.
URL: admin/attendance/master/overtime/
"""
model = OvertimeRule
template_name = 'attendance/master/overtime_master/list.html'
permission_required = 'view_overtimerule'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['permissions'] = self.get_current_user_permission_list()
context['org_items_list'] = get_organizational_structure()
return context
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
for ot in self.object_list:
try:
calc_model = ot.active_wage_calculation_model
except ValueError as e:
if str(e) == 'Exactly 1 wage calculation method must be active':
messages.error(
request,
'Exactly 1 wage calculation method must be active'
' for Overtime: {}'.format(ot.name),
)
calc_model = 'N/A'
else:
raise e
ot.calc_model = calc_model
context = self.get_context_data()
return self.render_to_response(context)
class OvertimeMasterCreateView(LoginRequiredMixin, PermissionMixin, CreateView):
"""
Create a new overtime rule.
URL: admin/attendance/master/overtime/create/
Note: Alternative implementation ideas for creating objects of other models
that have a FK relationship to this view's model:
- Manually create distinct forms in the template and parse the resulting
POST data in this view manually.
- Add links to views that create the objects at the end of this view's
form. With AJAX, save this form so it has a pk (otherwise the related
objects cannot be saved).
"""
model = OvertimeRule
fields = [
'name', 'code', 'description', 'default_calculation_unit', 'segment',
'buffer_duration_pre', 'buffer_duration_unit_pre', 'minimum_working_duration_pre',
'minimum_working_duration_unit_pre', 'tolerance_time_pre', 'buffer_duration_post',
'buffer_duration_unit_post', 'minimum_working_duration_post',
'minimum_working_duration_unit_post', 'tolerance_time_post',
'taxable',
]
template_name = 'attendance/master/overtime_master/create.html'
permission_required = 'add_overtimerule'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['permissions'] = self.get_current_user_permission_list()
context['org_items_list'] = get_organizational_structure()
return context
def get_form(self, form_class=None):
if form_class is None:
form_class = modelform_factory(
self.model,
fields=self.fields,
widgets={
'segment': RadioSelect,
},
)
return super().get_form(form_class=form_class)
def get_success_url(self):
return reverse(
'beehive_admin:attendance:overtime_update',
kwargs={'pk': self.object.pk},
)
class OvertimeMasterUpdateView(LoginRequiredMixin, PermissionMixin, SingleObjectMixin, TemplateView):
"""
Update an overtime rule.
URL: admin/attendance/master/overtime/<pk>/update/
"""
model = OvertimeRule
template_name = 'attendance/master/overtime_master/update.html'
permission_required = 'change_overtimerule'
overtimerule_form_class = modelform_factory(
OvertimeRule,
fields=[
'name', 'code', 'description', 'default_calculation_unit', 'segment',
'buffer_duration_pre', 'buffer_duration_unit_pre', 'minimum_working_duration_pre',
'minimum_working_duration_unit_pre', 'tolerance_time_pre', 'buffer_duration_post',
'buffer_duration_unit_post', 'minimum_working_duration_post',
'minimum_working_duration_unit_post', 'tolerance_time_post',
'taxable',
],
widgets={
'segment': RadioSelect,
},
)
duration_restriction_formset_class = inlineformset_factory(
OvertimeRule,
OvertimeDurationRestriction,
fields=['rule', 'ot_segment', 'scope_value', 'scope_unit', 'maximum_duration', 'maximum_duration_unit'],
widgets={'ot_segment': RadioSelect},
extra=0,
min_num=1,
)
calc_fixedrate_form_class = modelform_factory(
OvertimeWageCalculationFixedRate,
fields=['rule', 'enabled', 'basis', 'scope_value', 'amount'],
)
calc_variable_form_class = modelform_factory(
OvertimeWageCalculationVariable,
fields=['rule', 'enabled', 'basis'],
)
calc_manual_form_class = modelform_factory(
OvertimeWageCalculationManual,
fields=['rule', 'enabled'],
)
calc_rulebased_form_class = modelform_factory(
OvertimeWageCalculationRuleBased,
fields=['rule', 'enabled', 'basis'],
)
calc_rulebased_formula_formset_class = inlineformset_factory(
Variable,
Formula,
fields=['variable', 'priority', 'condition', 'rule'],
extra=0,
min_num=1,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.object = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['permissions'] = self.get_current_user_permission_list()
context['org_items_list'] = get_organizational_structure()
if 'form0' not in context:
context['form0'] = self.overtimerule_form_class(
instance=self.object,
)
if 'drforms' not in context:
context['drforms'] = self.duration_restriction_formset_class(
instance=self.object,
)
if 'form1' not in context:
context['form1'] = self.calc_fixedrate_form_class(
instance=self.object.fixed_rate_wage,
)
if 'form2' not in context:
context['form2'] = self.calc_variable_form_class(
instance=self.object.variable_wage,
)
if 'form3' not in context:
context['form3'] = self.calc_manual_form_class(
instance=self.object.manual_wage,
)
if 'form4' not in context:
context['form4'] = self.calc_rulebased_form_class(
instance=self.object.rule_based_wage,
)
if 'rbforms' not in context:
context['rbforms'] = self.calc_rulebased_formula_formset_class(
instance=self.object.rule_based_wage.variable,
)
return context
def get_success_url(self):
return reverse(
'beehive_admin:attendance:overtime_update',
kwargs={'pk': self.object.pk},
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if 'done' in request.POST:
if self.object.count_enabled_wage_calculation_methods() != 1:
messages.error(request, 'Exactly 1 amount calculation method may be active!')
return self.get(request, *args, **kwargs)
else:
return HttpResponseRedirect(reverse('beehive_admin:attendance:overtime_list'))
elif 'drforms' in request.POST:
formset = self.duration_restriction_formset_class(
request.POST, instance=self.object
)
if formset.is_valid():
formset.save()
messages.success(request, 'Saved.')
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(drforms=formset))
elif 'rbforms' in request.POST:
formset = self.calc_rulebased_formula_formset_class(
request.POST, instance=self.object.rule_based_wage.variable
)
if request.POST.get('rb_formset_enabled') == 'on':
print(request.POST)
formset = self.calc_rulebased_formula_formset_class(
request.POST, instance=self.object.rule_based_wage.variable
)
if formset.is_valid():
formset.save()
messages.success(request, 'Saved.')
return HttpResponseRedirect(self.get_success_url())
else:
print('invalid', formset.errors)
return self.render_to_response(self.get_context_data(drforms=formset))
else:
fid = request.POST.get('fid', 'form0')
if fid == 'form1':
form = self.calc_fixedrate_form_class(
request.POST, instance=self.object.fixed_rate_wage
)
elif fid == 'form2':
form = self.calc_variable_form_class(
request.POST, instance=self.object.variable_wage
)
elif fid == 'form3':
form = self.calc_manual_form_class(
request.POST, instance=self.object.manual_wage
)
elif fid == 'form4':
form = self.calc_rulebased_form_class(
request.POST, instance=self.object.rule_based_wage
)
formset = self.calc_rulebased_formula_formset_class(
request.POST, instance=self.object.rule_based_wage.variable
)
if form.is_valid():
form.save()
if formset.is_valid():
formset.save()
messages.success(request, 'Saved.')
return HttpResponseRedirect(self.get_success_url())
else:
if formset.is_valid():
formset.save()
return self.render_to_response(self.get_context_data(fid=form, rbforms=formset))
else:
form = self.overtimerule_form_class(
request.POST, instance=self.object
)
if form.is_valid():
obj = form.save()
if fid == 'form0':
self.object = obj
messages.success(request, 'Saved.')
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(**{fid: form}))
class OvertimeMasterDeleteView(LoginRequiredMixin, PermissionMixin, DeleteView):
"""
Delete an overtime rule.
URL: admin/attendance/master/overtime/<pk>/delete/
"""
model = OvertimeRule
success_url = reverse_lazy('beehive_admin:attendance:overtime_list')
template_name = 'attendance/master/overtime_master/confirm_delete.html'
permission_required = 'delete_overtimerule'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['permissions'] = self.get_current_user_permission_list()
context['org_items_list'] = get_organizational_structure()
return context
def delete(self, request, *args, **kwargs):
messages.success(request, 'Deleted "{}".'.format(self.get_object()))
return super().delete(request, *args, **kwargs)
| [
"[email protected]"
]
| |
5095f0660d9382f5d1d97384535279c1d362de76 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/SHOffer/YCHF_KCBYCHF_SHBP_356.py | 5d466bcc423c69283f3b9c0e2fe9973b9b6699aa | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,487 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_SHBP_356(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_SHBP_356')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_SHBP_356(self):
title = '重启数据库服务(沪A本方最优初始卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '初始',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
ebf10c635faeba2b5910b7e187fea1e9f26f56e4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/107/usersdata/195/52193/submittedfiles/questao3.py | 3ebb503b6b1fa43bc646831b23a3e4d72dde619d | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # -*- coding: utf-8 -*-
p=int(input('digite p:'))
q=int(input('digite q:'))
contador=0
i=2
while i<p:
if p%i==0:
contador=contador+1
i=i+1
while i<q:
if q%i==0:
contador=contador+1
i=i+1
if contador==0 and p%2!=0 and q%2!=0:
print('S')
else:
print('N')
| [
"[email protected]"
]
| |
0b88ec4badafd9b2ae3dca8979aed0160c9e81ee | f3f7fc5bf1d5657e7a67e46aee4b105198767889 | /manage.py | f0c87e8e4ef01a8361270108fde7443535393ad4 | []
| no_license | xjr7670/12306 | a2a16b73ce3cdb8ff1f8646429c2dc40716706fb | 32f065798732de744ef3a66739598af53a63bb32 | refs/heads/master | 2021-01-21T02:55:53.997748 | 2016-09-24T02:07:23 | 2016-09-24T02:07:23 | 68,806,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
from app import create_app
from flask_script import Manager, Shell
app = create_app('default')
manager = Manager(app)
manager.add_command("shell")
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
]
| |
de183b0867da57105653f428107297793038dc43 | 63d6a6809773c49edee2894fbe45915763756f90 | /authlib/admin_oauth/views.py | 5faeec307539ae210eeb130ba46e959308b12173 | [
"MIT"
]
| permissive | barseghyanartur/django-authlib | faaba71d80bec3331f9cd1dcd745dbff0ff96f6b | 4b4159eba619f6174d1f1e1cf33adf4893fa2315 | refs/heads/master | 2021-08-23T08:22:56.390862 | 2017-12-04T08:52:50 | 2017-12-04T08:54:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | import re
from django import VERSION
from django.conf import settings
from django.contrib import auth, messages
from django.shortcuts import redirect
from django.utils.http import is_safe_url
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from authlib.google import GoogleOAuth2Client
REDIRECT_SESSION_KEY = 'admin-oauth-next'
ADMIN_OAUTH_PATTERNS = settings.ADMIN_OAUTH_PATTERNS
if VERSION < (1, 11):
_orig_is_safe_url = is_safe_url
def is_safe_url(url, allowed_hosts):
host, = allowed_hosts
return _orig_is_safe_url(url=url, host=host)
def retrieve_next(request):
next = request.session.pop(REDIRECT_SESSION_KEY, None)
return (
next
if is_safe_url(url=next, allowed_hosts=[request.get_host()])
else None
)
@never_cache
def admin_oauth(request):
client = GoogleOAuth2Client(request)
if request.GET.get('next'):
request.session[REDIRECT_SESSION_KEY] = request.GET['next']
if all(key not in request.GET for key in ('code', 'oauth_token')):
return redirect(client.get_authentication_url())
user_data = client.get_user_data()
email = user_data.get('email', '')
if email:
for pattern, user_mail in ADMIN_OAUTH_PATTERNS:
if re.search(pattern, email):
user = auth.authenticate(email=user_mail)
if user and user.is_staff:
auth.login(request, user)
return redirect(retrieve_next(request) or 'admin:index')
messages.error(
request,
_('No email address received or email domain unknown.'),
)
return redirect('admin:login')
| [
"[email protected]"
]
| |
690313cbf83db05c3a09cb68f375a86b770771d5 | d548f1bde0d20dab787b59695e5467a44db1cef3 | /CarParkArcGisApi/CarParkArcGisApi/GetCurrentLocationApi.py | 8c5e60465173ffcf519b00b199168438cd385aaa | [
"MIT"
]
| permissive | moazzamwaheed2017/carparkapi | 2f53ab5b823d9afa11adc14073d7e147ca1d1de6 | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | refs/heads/master | 2023-01-12T03:51:42.497815 | 2020-02-25T14:00:37 | 2020-02-25T14:00:37 | 236,687,771 | 0 | 0 | MIT | 2023-01-07T14:21:30 | 2020-01-28T08:20:00 | TypeScript | UTF-8 | Python | false | false | 345 | py |
import requests
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request_url = 'https://get.geojs.io/v1/ip/geo/' + my_ip + '.json'
geo_request = requests.get(geo_request_url)
geo_data = geo_request.json()
print('Latitude: ' + geo_data['latitude'])
print('Longitude: ' + geo_data['longitude']) | [
"[email protected]"
]
| |
a9221ed4b7a9c89294debbcd8f295e48195a8098 | 9a9cffc79943e1846cfb2b7463b889aac102fcfe | /quickunit/vcs/git.py | e2d0143ff6c0ef3f14ee2b3d8262511f10e9f17b | [
"Apache-2.0"
]
| permissive | dcramer/quickunit | 5c7483f7b33758df3bc3181409ec95fb2c3f87e1 | f72b038aaead2c6f2c6013a94a1823724f59a205 | refs/heads/master | 2020-05-17T09:20:43.604622 | 2013-07-29T21:32:50 | 2013-07-29T21:32:50 | 3,350,340 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from subprocess import Popen, PIPE, STDOUT
from quickunit.diff import DiffParser
from quickunit.vcs.base import ChangedFile
def parse_commit(parent=None):
if parent is None:
parent = 'master'
proc = Popen(['git', 'merge-base', 'HEAD', parent], stdout=PIPE, stderr=STDOUT)
parent_revision = proc.stdout.read().strip()
# pull in our diff
# git diff `git merge-base HEAD master`
proc = Popen(['git', 'diff', parent_revision], stdout=PIPE, stderr=STDOUT)
diff = proc.stdout.read().strip()
parser = DiffParser(diff)
files = []
for file in parser.parse():
if file['is_header']:
continue
# file was removed
if file['new_filename'] == '/dev/null':
continue
filename = file['new_filename'][2:]
is_new = (file['old_filename'] == '/dev/null')
files.append(ChangedFile(filename, is_new))
return files
| [
"[email protected]"
]
| |
1ad56451c606e61c3c242b3bcee2e4d2658715e0 | 393988ecbc84cc99941aa7e8b77f9035a694c5e2 | /autotest/pymod/webserver.py | 2d699502c5a13f37cff19f0b41cf7e28cf967fd7 | [
"MIT"
]
| permissive | rbuffat/gdal | 625f29339aa3401fc02500ccc16969459aad1f76 | 9a563c54787d72271140150880227918ed141d34 | refs/heads/master | 2021-07-10T07:13:35.754922 | 2018-04-13T17:09:02 | 2018-04-13T17:09:02 | 129,447,856 | 0 | 0 | null | 2018-04-13T20:01:27 | 2018-04-13T20:01:27 | null | UTF-8 | Python | false | false | 14,441 | py | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Fake HTTP server
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010-2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except:
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import contextlib
import time
import sys
import gdaltest
from sys import version_info
do_log = False
custom_handler = None
@contextlib.contextmanager
def install_http_handler(handler_instance):
global custom_handler
custom_handler = handler_instance
try:
yield
finally:
handler_instance.final_check()
custom_handler = None
class RequestResponse:
def __init__(self, method, path, code, headers = {}, body = None, custom_method = None, expected_headers = {}, expected_body = None):
self.method = method
self.path = path
self.code = code
self.headers = headers
self.body = body
self.custom_method = custom_method
self.expected_headers = expected_headers
self.expected_body = expected_body
class FileHandler:
def __init__(self, dict):
self.dict = dict
def final_check(self):
pass
def do_HEAD(self, request):
if request.path not in self.dict:
request.send_response(404)
request.end_headers()
else:
request.send_response(200)
request.send_header('Content-Length', len(self.dict[request.path]))
request.end_headers()
def do_GET(self, request):
if request.path not in self.dict:
request.send_response(404)
request.end_headers()
else:
filedata = self.dict[request.path]
start = 0
end = len(filedata)
if 'Range' in request.headers:
import re
res = re.search('bytes=(\d+)\-(\d+)', request.headers['Range'])
if res:
res = res.groups()
start = int(res[0])
end = int(res[1]) + 1
if end > len(filedata):
end = len(filedata)
request.send_response(200)
if 'Range' in request.headers:
request.send_header('Content-Range', '%d-%d' % (start, end-1))
request.send_header('Content-Length', len(filedata))
request.end_headers()
request.wfile.write(filedata[start:end])
class SequentialHandler:
def __init__(self):
self.req_count = 0
self.req_resp = []
self.req_resp_map = {}
def final_check(self):
assert self.req_count == len(self.req_resp), (self.req_count, len(self.req_resp))
assert len(self.req_resp_map) == 0
def add(self, method, path, code = None, headers = {}, body = None, custom_method = None, expected_headers = {}, expected_body = None):
assert len(self.req_resp_map) == 0
self.req_resp.append( RequestResponse(method, path, code, headers, body, custom_method, expected_headers, expected_body) )
def add_unordered(self, method, path, code = None, headers = {}, body = None, custom_method = None, expected_headers = {}, expected_body = None):
self.req_resp_map[(method, path)] = RequestResponse(method, path, code, headers, body, custom_method, expected_headers, expected_body)
@staticmethod
def _process_req_resp(req_resp, request):
if req_resp.custom_method:
req_resp.custom_method(request)
else:
if req_resp.expected_headers:
for k in req_resp.expected_headers:
if k not in request.headers or request.headers[k] != req_resp.expected_headers[k]:
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
if req_resp.expected_body:
content = request.rfile.read(int(request.headers['Content-Length']))
if content != req_resp.expected_body:
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(req_resp.code)
for k in req_resp.headers:
request.send_header(k, req_resp.headers[k])
if req_resp.body:
request.send_header('Content-Length', len(req_resp.body))
elif 'Content-Length' not in req_resp.headers:
request.send_header('Content-Length', '0')
request.end_headers()
if req_resp.body:
try:
request.wfile.write(req_resp.body)
except:
request.wfile.write(req_resp.body.encode('ascii'))
def process(self, method, request):
if self.req_count < len(self.req_resp):
req_resp = self.req_resp[self.req_count]
if method == req_resp.method and request.path == req_resp.path:
self.req_count += 1
SequentialHandler._process_req_resp(req_resp, request)
return
else:
if (method, request.path) in self.req_resp_map:
req_resp = self.req_resp_map[(method, request.path)]
del self.req_resp_map[(method, request.path)]
SequentialHandler._process_req_resp(req_resp, request)
return
request.send_error(500,'Unexpected %s request for %s, req_count = %d' % (method, request.path, self.req_count))
def do_HEAD(self, request):
self.process('HEAD', request)
def do_GET(self, request):
self.process('GET', request)
def do_POST(self, request):
self.process('POST', request)
def do_PUT(self, request):
self.process('PUT', request)
def do_DELETE(self, request):
self.process('DELETE', request)
class DispatcherHttpHandler(BaseHTTPRequestHandler):
# protocol_version = 'HTTP/1.1'
def log_request(self, code='-', size='-'):
return
def do_HEAD(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('HEAD %s\n' % self.path)
f.close()
custom_handler.do_HEAD(self)
def do_DELETE(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('DELETE %s\n' % self.path)
f.close()
custom_handler.do_DELETE(self)
def do_POST(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('POST %s\n' % self.path)
f.close()
custom_handler.do_POST(self)
def do_PUT(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('PUT %s\n' % self.path)
f.close()
custom_handler.do_PUT(self)
def do_GET(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('GET %s\n' % self.path)
f.close()
custom_handler.do_GET(self)
class GDAL_Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
return
def do_HEAD(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('HEAD %s\n' % self.path)
f.close()
self.send_error(404,'File Not Found: %s' % self.path)
def do_DELETE(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('DELETE %s\n' % self.path)
f.close()
self.send_error(404,'File Not Found: %s' % self.path)
def do_POST(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('POST %s\n' % self.path)
f.close()
self.send_error(404,'File Not Found: %s' % self.path)
def do_PUT(self):
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('PUT %s\n' % self.path)
f.close()
self.send_error(404,'File Not Found: %s' % self.path)
def do_GET(self):
try:
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('GET %s\n' % self.path)
f.close()
if self.path == '/shutdown':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
#sys.stderr.write('stop requested\n')
self.server.stop_requested = True
return
return
except IOError:
pass
self.send_error(404,'File Not Found: %s' % self.path)
class GDAL_HttpServer(HTTPServer):
def __init__(self, server_address, handlerClass):
HTTPServer.__init__(self, server_address, handlerClass)
self.running = False
self.stop_requested = False
def is_running(self):
return self.running
def stop_server(self):
if self.running:
if version_info >= (2,6,0):
self.shutdown()
else:
gdaltest.gdalurlopen("http://127.0.0.1:%d/shutdown" % self.port)
self.running = False
def serve_until_stop_server(self):
self.running = True
if version_info >= (2,6,0):
self.serve_forever(0.25)
else:
while self.running and not self.stop_requested:
self.handle_request()
self.running = False
self.stop_requested = False
class GDAL_ThreadedHttpServer(Thread):
def __init__(self, handlerClass = None):
Thread.__init__(self)
ok = False
self.server = 0
if handlerClass is None:
handlerClass = GDAL_Handler
for port in range(8080,8100):
try:
self.server = GDAL_HttpServer(('', port), handlerClass)
self.server.port = port
ok = True
break
except:
pass
if not ok:
raise Exception('could not start server')
def getPort(self):
return self.server.port
def run(self):
try:
self.server.serve_until_stop_server()
except KeyboardInterrupt:
print('^C received, shutting down server')
self.server.socket.close()
def start_and_wait_ready(self):
if self.server.running:
raise Exception('server already started')
self.start()
while not self.server.running:
time.sleep(1)
def stop(self):
self.server.stop_server()
# Explicitly destroy the object so that the socket is really closed
del self.server
def run_server(self, timeout):
if not self.server.running:
raise Exception('server not started')
count = 0
while (timeout <= 0 or count < timeout) and self.server.running and not self.server.stop_requested:
#print(count)
#print(self.server.is_running())
time.sleep(0.5)
count = count + 0.5
self.stop()
def launch(fork_process = None, handler = None):
if handler is not None:
if fork_process:
raise Exception('fork_process = True incompatible with custom handler')
fork_process = False
else:
fork_process = True
if not fork_process or handler is not None:
try:
if handler is None:
handler = GDAL_Handler
server = GDAL_ThreadedHttpServer(handler)
server.start_and_wait_ready()
return (server, server.getPort())
except:
return (None, 0)
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
(process, process_stdout) = gdaltest.spawn_async(python_exe + ' ../pymod/webserver.py')
if process is None:
return (None, 0)
line = process_stdout.readline()
line = line.decode('ascii')
process_stdout.close()
if line.find('port=') == -1:
return (None, 0)
port = int(line[5:])
if port != 0:
print('HTTP Server started on port %d' % port)
return (process, port)
def server_stop(process, port):
if isinstance(process, GDAL_ThreadedHttpServer):
process.stop()
return
gdaltest.gdalurlopen('http://127.0.0.1:%d/shutdown' % port)
gdaltest.wait_process(process)
def main():
try:
server = GDAL_ThreadedHttpServer(GDAL_Handler)
server.start_and_wait_ready()
print('port=%d' % server.getPort())
sys.stdout.flush()
except:
print('port=0')
sys.stdout.flush()
sys.exit(0)
server.run_server(10)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
f86e2edac18dd5a144ac9e4e8e186ac315bc9758 | eb85b501de159dd2c549e4d2433a03592aae5e15 | /evernote_to_sqlite/cli.py | b2c550e3c6c38a26941cd9359960f032d9af4bb7 | [
"Apache-2.0"
]
| permissive | ktaranov/evernote-to-sqlite | f6b3912da78ee74afcf9a43b4b2b2db05eba05c7 | 92254b71075c8806bca258c939e24af8397cdf98 | refs/heads/main | 2023-01-20T04:32:42.877585 | 2020-10-16T20:15:51 | 2020-10-16T20:15:51 | 319,658,620 | 1 | 0 | Apache-2.0 | 2020-12-08T14:11:02 | 2020-12-08T14:11:02 | null | UTF-8 | Python | false | false | 914 | py | import sqlite_utils
import click
import os
from .utils import find_all_tags, save_note, ensure_indexes
@click.group()
@click.version_option()
def cli():
"Tools for converting Evernote content to SQLite"
@cli.command()
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument(
"enex_file",
type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def enex(db_path, enex_file):
"Convert Evernote .enex exports to SQLite"
file_length = os.path.getsize(enex_file)
fp = open(enex_file)
db = sqlite_utils.Database(db_path)
with click.progressbar(length=file_length, label="Importing from ENEX") as bar:
for tag, note in find_all_tags(fp, ["note"], progress_callback=bar.update):
save_note(db, note)
fp.close()
ensure_indexes(db)
| [
"[email protected]"
]
| |
886d2201fefef83111d3a14b1220eff983280a4d | 3ab7e700203054e104e6c60295c0a8455bc388b1 | /i_entity_extractor/extractors/annual_reports/annual_reports_extractor.py | c56bf238b41ab2c3c03321e7f2272902cc3de882 | []
| no_license | youfeng243/crawler | e8114ab5ef68bb9fd7e4296452d63b53d3d4080a | 59eaabef94de67444f09cfe5b25d481034d10f29 | refs/heads/master | 2021-07-11T10:12:27.946819 | 2017-10-12T11:35:27 | 2017-10-12T11:35:27 | 106,583,181 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,085 | py | # coding=utf-8
# 企业年报实体解析
import copy
import json
import time
from i_entity_extractor.common_parser_lib import toolsutil
from i_entity_extractor.extractors.default.default_extractor import DefaultExtractor
class AnnualReportsExtractor(DefaultExtractor):
def __init__(self, topic_info, log):
DefaultExtractor.__init__(self, topic_info, log)
self.config_path = self.basic_path + "i_entity_extractor/extractors/annual_reports/mapping.conf"
self.mapping_conf = self.read_config()
def read_config(self):
mapping_conf = {}
file = open(self.config_path)
for line in file:
pars = line.strip().split(',')
if len(pars) >= 2:
key = pars[0].encode("utf8")
value = pars[1].encode("utf8")
mapping_conf[key] = value
return mapping_conf
def format_extract_data(self, extract_data, topic_id):
'''实体解析抽取数据'''
entity_data = {}
if extract_data:
entity_data = copy.deepcopy(extract_data)
if entity_data.has_key("base_info"):
entity_data.pop("base_info")
for item in extract_data["base_info"]:
for key, value in item.items():
base_key = key[:3]
base_value = "value" + key[3:]
if base_key == "key" and item.has_key(base_value):
key = item[key].encode("utf8")
value = item[base_value].encode("utf8")
key_values = [(key, value)]
key_pars = key.split("\t")
value_pars = value.split("\t")
if len(key_pars) > 1 and len(key_pars) == len(value_pars):
index = 0
while index < len(key_pars):
key_values.append((key_pars[index], value_pars[index]))
index += 1
for key, value in key_values:
if key in self.mapping_conf:
entity_data[self.mapping_conf[key]] = value
if self.mapping_conf[key] == 'code':
entity_data.pop("code")
if len(value) == 18:
entity_data["unified_social_credit_code"] = value
else:
entity_data["registered_code"] = value
break
if not entity_data.get("company_name") and entity_data.get("company"):
entity_data["company_name"] = entity_data.get("company")
if entity_data.has_key("edit_change_infos") and entity_data.get("edit_change_infos") != None:
entity_data["edit_change_infos"] = self.deal_time(entity_data.get("edit_change_infos",[]),["change_date"])
if entity_data.has_key("shareholder_information") and entity_data.get("shareholder_information") != None:
entity_data["shareholder_information"] = self.deal_time(entity_data.get("shareholder_information",[]),["subscription_time","paied_time"])
if entity_data.has_key("administrative_licensing_info") and entity_data.get("administrative_licensing_info") != None:
entity_data["administrative_licensing_info"] = self.deal_time(entity_data.get("administrative_licensing_info",[]),["license_period_date"])
if entity_data.has_key("edit_shareholding_change_infos") and entity_data.get("edit_shareholding_change_infos") != None:
entity_data["edit_shareholding_change_infos"] = self.deal_time(entity_data.get("edit_shareholding_change_infos",[]),["change_date"])
return entity_data
def deal_time(self,src_data_list,key_list):
new_data_list = []
deal_flag = False
for iter_data in src_data_list:
for key in key_list:
try:
time_value = str(iter_data[key])
ret = toolsutil.re_find_one(u'\d+',time_value)
if len(time_value) > 10 and ret == time_value:
tmp = int(time_value[:-3])
data_value = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(tmp))
iter_data[key] = data_value
deal_flag = True
else:
iter_data[key] = toolsutil.norm_date_time(iter_data[key])
deal_flag = True
except:
pass
new_data_list.append(iter_data)
return new_data_list
# if __name__ == '__main__':
# import sys
# sys.path.append('../../')
# topic_id = 136
# import pytoml
# from conf import get_config
# from bdp.i_crawler.i_extractor.ttypes import BaseInfo, CrawlInfo, ExtractInfo, PageParseInfo
# with open('../../entity.toml', 'rb') as config:
# config = pytoml.load(config)
# conf = get_config(config)
# import common
# from entity_extractor_route import EntityExtractorRoute
# route = EntityExtractorRoute()
# topic_info = route.all_topics.get(topic_id, None)
# obj = AnnualReportsExtractor(topic_info, common.log)
# extract_data = {
# "base_info": [
# {
# "key": "统一社会信用代码/注册号",
# "value": "911400001123101349"
# },
# {
# "key": "企业名称",
# "value": "华晋焦煤有限责任公司"
# },
# {
# "key": "企业通信地址",
# "value": "山西省吕梁市离市区久安路57号"
# },
# {
# "key": "邮政编码",
# "value": "033000"
# },
# {
# "key": "企业联系电话",
# "value": "0358-8296368"
# },
# {
# "key": "企业电子邮箱",
# "value": "[email protected]"
# },
# {
# "key": "从业人数",
# "value": "企业选择不公示"
# },
# {
# "key": "企业经营状态",
# "value": "开业"
# },
# {
# "key": "是否有网站或网店",
# "value": "是"
# },
# {
# "key": "有限责任公司本年度是否发生股东股权转让",
# "value": "否"
# },
# {
# "key": "是否有投资信息或购买其他公司股权",
# "value": "有"
# },
# {
# "key": "对外提供保证担保信息",
# "value": "否"
# }
# ],
# "enterprise_asset_status_information": [
# {
# "key": "资产总额",
# "value": "企业选择不公示"
# },
# {
# "key": "所得者权益合计",
# "value": "企业选择不公示"
# },
# {
# "key": "营业总收入",
# "value": "企业选择不公示"
# },
# {
# "key": "利润总额",
# "value": "企业选择不公示"
# },
# {
# "key": "营业总收入中主营业务收入",
# "value": "企业选择不公示"
# },
# {
# "key": "净利润",
# "value": "企业选择不公示"
# },
# {
# "key": "纳税总额",
# "value": "企业选择不公示"
# },
# {
# "key": "负债总额",
# "value": "企业选择不公示"
# }
# ],
# "invested_companies": [
# {
# "company_name": "华晋煤层气综合利用有限责任公司",
# "registered_code": "140000110106177"
# },
# {
# "company_name": "山西华晋吉宁煤业有限责任公司",
# "registered_code": "140000105974138"
# },
# {
# "company_name": "山西华晋明珠煤业有限责任公司",
# "registered_code": "140000206970138"
# },
# {
# "company_name": "山西焦煤华晋寨圪塔能源有限责任公司",
# "registered_code": "141000000074580"
# },
# {
# "company_name": "石太铁路客运专线有限责任公司",
# "registered_code": "140100103043161"
# },
# {
# "company_name": "山西汾河焦煤股份有限公司",
# "registered_code": "140000100099469"
# },
# {
# "company_name": "山西焦煤集团汾河物业管理有限公司",
# "registered_code": "140100103047124"
# },
# {
# "company_name": "山西焦煤集团房地产开发有限公司",
# "registered_code": "140100103020695"
# },
# {
# "company_name": "山西焦煤交通能源投资有限公司",
# "registered_code": "140000110111179"
# }
# ],
# "province": "山西",
# "shareholder_information": [
# {
# "paied_amount": "42354.798018",
# "paied_time": "1204646400000",
# "paied_type": "货币",
# "shareholder_name": "山西焦煤集团有限责任公司",
# "subscription_amount": "42354.798018",
# "subscription_time": "1204646400000",
# "subscription_type": "货币"
# },
# {
# "paied_amount": "40693.825547",
# "paied_time": "2017年6月4日",
# "paied_type": "货币",
# "shareholder_name": "中国中煤能源股份有限公司",
# "subscription_amount": "40693.825547",
# "subscription_time": "1204646400000",
# "subscription_type": "货币"
# }
# ],
# "websites": [
# {
# "name": "华晋焦煤有限责任公司",
# "site": "http://www.sx.xinhuanet.com/qyzx/hjjm/",
# "type": "网站"
# }
# ],
# "year": "2015年度"
# }
# src_url = "www.baidu.com"
# data = json.dumps(extract_data)
# extract_info = ExtractInfo(ex_status=2, extract_data=data)
# base_info = BaseInfo(url=src_url)
# parser_info = PageParseInfo(base_info=base_info, extract_info=extract_info)
# entity_data = obj.entity_extract(parser_info, extract_data)
# #data = obj.after_extract(src_url, entity_data, extract_data)
# print data
# for key, value in entity_data.items():
# if isinstance(value, list):
# for i in value:
# print key, ":", i
# elif isinstance(value, dict):
# for key2, value2 in value.items():
# print key2, ":", value2
# else:
# print key, ":", value | [
"[email protected]"
]
| |
1fb23b2832fa8ad3d8b5f3b2757274ad1463a27e | 02862f0b86638cd4e252bfd6bb92be931c10d569 | /algorithms/arrays/next_permutation/next_permutation.py | 1ad5e3cd64ea4b8252e014e068282398962daa08 | []
| no_license | Himstar8/Algorithm-Enthusiasts | ceb65df893d668a59018cbda278c3a03622a6311 | d3634daa7676e5a06646e0dbfc4ed30dac18ca9d | refs/heads/master | 2020-07-09T16:44:50.520423 | 2019-03-21T18:20:10 | 2019-03-21T18:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | def next_permutation(nums):
def find_min_larger_index(idx, n):
while idx < len(nums) and nums[idx] > n:
idx += 1
return idx - 1
i = len(nums) - 1
while i > 0 and nums[i] <= nums[i-1]:
i -= 1
if i == 0:
nums.reverse()
else:
idx = find_min_larger_index(i, nums[i-1])
nums[idx], nums[i-1] = nums[i-1], nums[idx]
start = i
end = len(nums) - 1
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
if __name__ == '__main__':
nums = [5, 1, 1]
next_permutation(nums)
assert(nums == [1, 1, 5])
nums = [2, 1, 4, 3]
next_permutation(nums)
assert(nums == [2, 3, 1, 4])
nums = [1, 5, 1]
next_permutation(nums)
assert(nums == [5, 1, 1])
| [
"[email protected]"
]
| |
d3d1c831f63c2c3134ede70ab7c0b02b62d99b41 | c863a1349cde0217459fde44d969df7f04c8e57d | /tb/test_axis_eth_fcs_check_64.py | f05d1d677984fed35bdf5b4532f0129b23460834 | [
"MIT"
]
| permissive | hermixy/verilog-ethernet | 5c09e4cb94590bc858a716ef764fd3776aad693d | b3f50ac2c724763c1c30ed9c33a3489517b7d457 | refs/heads/master | 2020-04-04T20:01:52.758794 | 2018-11-02T07:40:15 | 2018-11-02T07:40:15 | 156,231,015 | 1 | 0 | null | 2018-11-05T14:29:18 | 2018-11-05T14:29:17 | null | UTF-8 | Python | false | false | 12,125 | py | #!/usr/bin/env python
"""
Copyright (c) 2015-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import struct
import zlib
import axis_ep
import eth_ep
module = 'axis_eth_fcs_check_64'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[64:])
input_axis_tkeep = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[64:])
output_axis_tkeep = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
busy = Signal(bool(0))
error_bad_fcs = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
pause=source_pause,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser,
busy=busy,
error_bad_fcs=error_bad_fcs
)
@always(delay(4))
def clkgen():
clk.next = not clk
error_bad_fcs_asserted = Signal(bool(0))
@always(clk.posedge)
def monitor():
if (error_bad_fcs):
error_bad_fcs_asserted.next = 1
def wait_normal():
while input_axis_tvalid or output_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
def wait_pause_sink():
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
for payload_len in list(range(1,18))+list(range(40,58)):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
test_frame.update_fcs()
axis_frame = test_frame.build_axis_fcs()
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(axis_frame)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame
assert not rx_frame.user[-1]
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis_fcs()
axis_frame2 = test_frame2.build_axis_fcs()
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(axis_frame1)
source.send(axis_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame1
assert not rx_frame.user[-1]
yield sink.wait()
rx_frame = sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame2
assert not rx_frame.user[-1]
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis_fcs()
axis_frame2 = test_frame2.build_axis_fcs()
axis_frame1.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(axis_frame1)
source.send(axis_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.user[-1]
yield sink.wait()
rx_frame = sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame2
assert not rx_frame.user[-1]
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 4: bad FCS, length %d" % payload_len)
current_test.next = 4
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame1.update_fcs()
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame2.update_fcs()
axis_frame1 = test_frame1.build_axis_fcs()
axis_frame2 = test_frame2.build_axis_fcs()
axis_frame1.data[-1] ^= 0xff
for wait in wait_normal, wait_pause_source, wait_pause_sink:
error_bad_fcs_asserted.next = 0
source.send(axis_frame1)
source.send(axis_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert error_bad_fcs_asserted
assert rx_frame.user[-1]
yield sink.wait()
rx_frame = sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame2
assert not rx_frame.user[-1]
assert sink.empty()
yield delay(100)
for payload_len in list(range(1,18)):
yield clk.posedge
print("test 5: test short packet, length %d" % payload_len)
current_test.next = 5
test_frame = bytearray(range(payload_len))
fcs = zlib.crc32(bytes(test_frame)) & 0xffffffff
test_frame_fcs = test_frame + struct.pack('<L', fcs)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame_fcs)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert test_frame == bytearray(rx_frame)
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| [
"[email protected]"
]
| |
27436bf65203665f1e775cd08464696bf984e191 | 67612c27c6d79ae180a5bc266833899abfefe9f5 | /面试题64. 求1+2+…+n LCOF.py | d42954a5bf567328df88e46091a401b867c5b820 | []
| no_license | Katherinaxxx/leetcode | 7e9d0bd7dc613a824116f1247f42bfc33e485ff3 | dcebf49d1e024b9e69c4d9606c8afb32b9d07029 | refs/heads/master | 2023-01-27T20:14:09.459296 | 2023-01-08T07:01:53 | 2023-01-08T07:01:53 | 215,688,672 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/2 上午11:20
@Author : Catherinexxx
@Site :
@File : 面试题64. 求1+2+…+n LCOF.py
@Software: PyCharm
"""
"""
求 1+2+...+n ,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
"""
# math (1+n)n/2 O(1)time
class Solution:
def sumNums(self, n: int) -> int:
return (1+n)*n//2 | [
"[email protected]"
]
| |
96940db9386ddb1089016400e3a545dda5a13801 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/Lbr/br-66_MD_NVT_rerun/set_1ns_equi_1.py | 4a79ed1108775404ea3880b83c80a1b28ee355a5 | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/Lbr/MD_NVT_rerun/ti_one-step/br_66/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../br-66_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
0c6b797449a22309a265e557ebd1dadf4115400b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/BLOG/Data-Structures/1-Python/Python-master/linear_algebra/src/rayleigh_quotient.py | 69bbbac119e80d48a3cd1670171c31e6020d8d95 | [
"MIT"
]
| permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,451 | py | """
https://en.wikipedia.org/wiki/Rayleigh_quotient
"""
import numpy as np
def is_hermitian(matrix: np.array) -> bool:
"""
Checks if a matrix is Hermitian.
>>> import numpy as np
>>> A = np.array([
... [2, 2+1j, 4],
... [2-1j, 3, 1j],
... [4, -1j, 1]])
>>> is_hermitian(A)
True
>>> A = np.array([
... [2, 2+1j, 4+1j],
... [2-1j, 3, 1j],
... [4, -1j, 1]])
>>> is_hermitian(A)
False
"""
return np.array_equal(matrix, matrix.conjugate().T)
def rayleigh_quotient(A: np.array, v: np.array) -> float:
"""
Returns the Rayleigh quotient of a Hermitian matrix A and
vector v.
>>> import numpy as np
>>> A = np.array([
... [1, 2, 4],
... [2, 3, -1],
... [4, -1, 1]
... ])
>>> v = np.array([
... [1],
... [2],
... [3]
... ])
>>> rayleigh_quotient(A, v)
array([[3.]])
"""
v_star = v.conjugate().T
return (v_star.dot(A).dot(v)) / (v_star.dot(v))
def tests() -> None:
A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
v = np.array([[1], [2], [3]])
assert is_hermitian(A), f"{A} is not hermitian."
print(rayleigh_quotient(A, v))
A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
assert is_hermitian(A), f"{A} is not hermitian."
assert rayleigh_quotient(A, v) == float(3)
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| [
"[email protected]"
]
| |
f66363574209a1b21f420d8afe431fd62465530f | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/CSS/juejin_1165.py | 2b72076f6d4f211f941f1ba475b380432f75338d | []
| no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,321 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6957521363326205982", "article_info": {"article_id": "6957521363326205982", "user_id": "616164205017336", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS实战笔记(十二) 图片等比例缩放", "brief_content": "在网页展示图片是一个很常见的需求,大多数情况下,展示区域的大小是固定的,原图片的大小也是固定的 如果展示区域的宽高和原图片的宽高不等比例,那么在默认情况下很可能会压缩或拉伸图片以适应区域大小", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1619924179", "mtime": "1620293441", "rtime": "1620293441", "draft_id": "6957521346335080462", "view_count": 513, "collect_count": 7, "digg_count": 3, "comment_count": 0, "hot_index": 28, "is_hot": 0, "rank_index": 0.00078748, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "616164205017336", "user_name": "半虹", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/2e093cb5175ee6b3bd2882e8ac887972~300x300.image", "level": 2, "description": "愿你我都能成为闪闪发光的人,公众号「半虹小站」", "followee_count": 0, "follower_count": 20, "post_article_count": 159, "digg_article_count": 0, "got_digg_count": 32, "got_view_count": 11126, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 143, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6957521363326205982, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6867054761741549576", "article_info": {"article_id": "6867054761741549576", "user_id": "4424090519347790", "category_id": "6809637767543259144", "tag_ids": [6809640614175604744, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a45578fdbfc6462ba06deface7930ce5~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "[译] Javascript 应用中引入 CSS 的几种方式", "brief_content": "欢迎你踏上了一条在前端世界中饱含争议的道路!相信大部分读者会在关于如何在 JavaScript 应用中处理 CSS 这一话题上产生共鸣。 文章伊始,先声明一句:无论是在基于 Vue、Angular 还是 React 构建的应用,针对如何处理 CSS,世界上并没有任何放之四海而皆…", "is_english": 0, "is_original": 1, "user_index": 10.646441555376368, "original_type": 0, "original_author": "", "content": "", "ctime": "1598860889", "mtime": "1599106329", "rtime": "1598943699", "draft_id": "6867054460959457293", "view_count": 1786, "collect_count": 12, "digg_count": 11, "comment_count": 0, "hot_index": 100, "is_hot": 0, "rank_index": 0.0007902, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4424090519347790", "user_name": "lsvih", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/3d27ae06c6cd88523d3875d3ad203b83~300x300.image", "level": 4, "description": "noob", "followee_count": 13, "follower_count": 425, "post_article_count": 60, "digg_article_count": 371, "got_digg_count": 2753, "got_view_count": 302472, "post_shortmsg_count": 4, "digg_shortmsg_count": 18, "isfollowed": false, "favorable_author": 1, "power": 5777, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6867054761741549576, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844903811400597512", "article_info": {"article_id": "6844903811400597512", "user_id": "3333374985382749", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903811400597512", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/3/21/169a08350ee2d4c5~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "一些有趣的 CSS 魔法和布局(下)(结尾有岗位内推哦~)", "brief_content": "上一篇 一些有趣的 CSS 魔法和布局(上) 中,我们聊了一些有趣且实用的布局。今天,将呈现一些 CSS 带来的魔法特效,有部分特效可以帮我们省去不少工作量。 在以前遇到这个需求的时候,我们可能会想到用 JS 来操作内容的显式与否。现在,CSS3 的 transition 可以…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1554163956", "mtime": "1598497574", "rtime": "1554171402", "draft_id": "6845076219604500487", "view_count": 4323, "collect_count": 123, "digg_count": 96, "comment_count": 27, "hot_index": 339, "is_hot": 0, "rank_index": 0.0007895, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3333374985382749", "user_name": "Micherwa", "company": "上海", "job_title": "前端小学生", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/164f6c495bc116c0671c7efcbe048213~300x300.image", "level": 3, "description": "朋友都叫我超哥,专注前端开发8年。 目前在莉莉丝游戏的广告技术中心担", "followee_count": 17, "follower_count": 1593, "post_article_count": 28, "digg_article_count": 88, "got_digg_count": 1839, "got_view_count": 115354, "post_shortmsg_count": 2, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 2992, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903811400597512, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6994352704189366302", "article_info": {"article_id": "6994352704189366302", "user_id": "3210229685964583", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS之linear-gradient(渐变、切角)", "brief_content": "linear-gradient有什么用?颜色渐变。从这个例子可以看出该属性最直接的效果是让容器渐变颜色。那么它接受的参数有什么呢?又有什么技巧呢?切角。切角的思路其实很简单,将其中一个颜色变为透明即可", "is_english": 0, "is_original": 1, "user_index": 0.359369544425812, "original_type": 0, "original_author": "", "content": "", "ctime": "1628499677", "mtime": "1628500800", "rtime": "1628500800", "draft_id": "6994327798689038367", "view_count": 80, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00093526, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3210229685964583", "user_name": "缺钱从零开始学前端", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/653ef7ec91f4201fc97ddd1dcf3ae05d~300x300.image", "level": 1, "description": "", "followee_count": 9, "follower_count": 1, "post_article_count": 52, "digg_article_count": 13, "got_digg_count": 15, "got_view_count": 5710, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 72, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6994352704189366302, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6973177746096979975", "article_info": {"article_id": "6973177746096979975", "user_id": "3773179639634423", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b011cc90c1cc4be78217af31949ae799~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "浅析CSS样式权重和优先级", "brief_content": "一、什么是优先级: 即通过优先级来判断如何在页面上显示这些样式。优先级是基于不同种类的选择器组成的匹配规则。 关于CSS的选择器可以看这篇《一次性搞懂CSS选择器》,这里就不重复讲啦。 二、优先级是怎", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623570178", "mtime": "1623570730", "rtime": "1623570730", "draft_id": "6972900853061681165", "view_count": 286, "collect_count": 0, "digg_count": 2, "comment_count": 2, "hot_index": 18, "is_hot": 0, "rank_index": 0.00078658, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3773179639634423", "user_name": "啊巴啊巴", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/54561364cc9901203cd9b5f3ac4cacaa~300x300.image", "level": 2, "description": "跬步积千里,滴水汇长河", "followee_count": 12, "follower_count": 9, "post_article_count": 43, "digg_article_count": 57, "got_digg_count": 155, "got_view_count": 7975, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 234, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6973177746096979975, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6859890348089409544", "article_info": {"article_id": "6859890348089409544", "user_id": "3051900008147176", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS到底会不会阻塞页面渲染", "brief_content": "可能大家都知道,js执行会阻塞DOM树的解析和渲染,那么css加载会阻塞DOM树的解析和渲染吗?接下来,我们就一起来分析一下。 那么为什么会出现上面的现象呢?我们从浏览器的渲染过程来解析下。 不同的浏览器使用的内核不同,所以他们的渲染过程也是不一样的。目前主要有两个 根据Ren…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1597192816", "mtime": "1597367175", "rtime": "1597212797", "draft_id": "6859886280168177678", "view_count": 1852, "collect_count": 16, "digg_count": 24, "comment_count": 2, "hot_index": 118, "is_hot": 0, "rank_index": 0.00078771, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3051900008147176", "user_name": "米亚流年", "company": "\n", "job_title": "公众号: 前端逗逗飞", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ae73095ad1b02a06d4c6b1056924ff5c~300x300.image", "level": 2, "description": "前端+iOS移动端、画画、狼人杀", "followee_count": 108, "follower_count": 98, "post_article_count": 26, "digg_article_count": 7, "got_digg_count": 151, "got_view_count": 20811, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 359, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6859890348089409544, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844903890161238030", "article_info": {"article_id": "6844903890161238030", "user_id": "1099167359045501", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903890161238030", "cover_image": "", "is_gfw": 0, "title": "温故而知我不懂的CSS", "brief_content": "流:“流”实际上是 CSS世界中的一种基本的定位和布局机制,可以理解为现实世界的一套物理规则,“流”跟现实世界的“水流”有异曲同工的表现。 特殊布局与流的破坏。如果全部都是以默认的“流”来渲染,我们只能实现类似 W3C 那样的文档网页,但是,实际的网页是有很多复杂的布局的,怎么…", "is_english": 0, "is_original": 1, "user_index": 5.4400125260981, "original_type": 0, "original_author": "", "content": "", "ctime": "1563196331", "mtime": "1599995318", "rtime": "1563245046", "draft_id": "6845076375600693261", "view_count": 3559, "collect_count": 69, "digg_count": 90, "comment_count": 15, "hot_index": 282, "is_hot": 0, "rank_index": 0.00078706, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1099167359045501", "user_name": "落落落洛克", "company": "前端壹栈", "job_title": "前端开发工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/b3935c1f74d448033838cb423c94fb86~300x300.image", "level": 3, "description": "高级切图仔", "followee_count": 79, "follower_count": 953, "post_article_count": 20, "digg_article_count": 262, "got_digg_count": 1068, "got_view_count": 47180, "post_shortmsg_count": 11, "digg_shortmsg_count": 173, "isfollowed": false, "favorable_author": 0, "power": 1539, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903890161238030, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6985541311298273317", "article_info": {"article_id": "6985541311298273317", "user_id": "3887474562304903", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "《图解CSS》CSS简介", "brief_content": "在学习一门新的知识之前,我们需要先知道,它是什么,为什么使用,和怎么使用,也就是常说的3W(what, why,how)。 CSS是什么? CSS全称为Cascading Style Sheets,中", "is_english": 0, "is_original": 1, "user_index": 1, "original_type": 0, "original_author": "", "content": "", "ctime": "1626448103", "mtime": "1626508274", "rtime": "1626508274", "draft_id": "6985540635235188750", "view_count": 129, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.00078638, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3887474562304903", "user_name": "张中华", "company": "", "job_title": "开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/73d0a4010e0f51cf2d340b1cbbc3eefb~300x300.image", "level": 2, "description": "", "followee_count": 2, "follower_count": 8, "post_article_count": 73, "digg_article_count": 0, "got_digg_count": 69, "got_view_count": 4478, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 113, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6985541311298273317, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6850037281828143117", "article_info": {"article_id": "6850037281828143117", "user_id": "2330620350435501", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Web 技术:CSS最小和最大(宽度/高度)知识点及优缺点", "brief_content": "大家都说简历没项目写,我就帮大家找了一个项目,还附赠【搭建教程】。 通常,我们希望限制元素相对于其父元素的宽度,同时使其具有动态性。因此,有一个基础宽度或高度的能力,使其扩展的基础上,可用的空间。比如说,我们有一个按钮,它的宽度应该是最小的,不应该低于它的宽度。这就是最大和最小…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1594857738", "mtime": "1603274545", "rtime": "1594868508", "draft_id": "6850424826134429710", "view_count": 2103, "collect_count": 24, "digg_count": 23, "comment_count": 0, "hot_index": 128, "is_hot": 0, "rank_index": 0.00078415, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2330620350435501", "user_name": "前端小智", "company": "B站@前端小智", "job_title": "公众号 @ 大迁世界", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/d52a642432be7f35a15d84b18c86d9ab~300x300.image", "level": 7, "description": "个人微信:qq449245884\n加刷碗智为好友,我会第一时间和你分享前端行业趋势,学习途径,搞怪趣事,生活中的另一面幽默等等。", "followee_count": 28, "follower_count": 27589, "post_article_count": 282, "digg_article_count": 110, "got_digg_count": 33329, "got_view_count": 1880605, "post_shortmsg_count": 5, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 52547, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6850037281828143117, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844903854660665358", "article_info": {"article_id": "6844903854660665358", "user_id": "1134351730353207", "category_id": "6809637767543259144", "tag_ids": [6809640614175604744, 6809640407484334093, 6809640394175971342, 6809640398105870343, 6809640402103042061], "visible_level": 0, "link_url": "https://juejin.im/post/6844903854660665358", "cover_image": "", "is_gfw": 0, "title": "[译] 从原型图到成品:步步深入 CSS 布局", "brief_content": "对很多人来说,创建布局是前端开发领域中最难啃的骨头之一。 你肯定经历过耗费数个小时,换着花样地尝试所有可能起作用的 CSS 属性、一遍遍地从 Stack Overflow 上复制粘贴代码,寄希望于误打误撞地赌中那个能实现预期效果的魔幻组合。 如果你的惯用策略就是按部就班地组合布…", "is_english": 0, "is_original": 1, "user_index": 8.7685672340047, "original_type": 0, "original_author": "", "content": "", "ctime": "1558951228", "mtime": "1599913778", "rtime": "1558971406", "draft_id": "6845076322968928270", "view_count": 4003, "collect_count": 109, "digg_count": 91, "comment_count": 10, "hot_index": 301, "is_hot": 0, "rank_index": 0.00078396, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1134351730353207", "user_name": "👊Badd", "company": "北京卡路里信息技术有限公司", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/066f5e1f1f83c428c809dfe5bbb1e214~300x300.image", "level": 3, "description": "Don't be good, be great.", "followee_count": 41, "follower_count": 435, "post_article_count": 39, "digg_article_count": 20, "got_digg_count": 1645, "got_view_count": 139381, "post_shortmsg_count": 15, "digg_shortmsg_count": 346, "isfollowed": false, "favorable_author": 0, "power": 3038, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546522, "tag_id": "6809640402103042061", "tag_name": "前端框架", "color": "#F2AB5B", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f7a198f1e1aeb6d79878.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435964339, "mtime": 1631690383, "id_type": 9, "tag_alias": "", "post_article_count": 4037, "concern_user_count": 256973}], "user_interact": {"id": 6844903854660665358, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844904019689734151", "article_info": {"article_id": "6844904019689734151", "user_id": "2594503171249997", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844904019689734151", "cover_image": "", "is_gfw": 0, "title": "移动端长按保存、取消长按保存图片", "brief_content": "有时候会遇到在移动端长按保存图片的功能,微信打开h5页面可以实现,那么普通浏览器可能效果不一。 safari浏览器在打开的时候效果不尽如人意,比如做一个保存二维码功能,我的iphone 11长按下图毫无反应。 -webkit-touch-callout。 可以看到safari可…", "is_english": 0, "is_original": 1, "user_index": 0.23605734014348, "original_type": 0, "original_author": "", "content": "", "ctime": "1576206969", "mtime": "1598539319", "rtime": "1576249288", "draft_id": "6845076574937563149", "view_count": 4119, "collect_count": 11, "digg_count": 12, "comment_count": 0, "hot_index": 217, "is_hot": 0, "rank_index": 0.00078177, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2594503171249997", "user_name": "Keely40285", "company": "", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/7018ce69f9f4d6119349c34977cc7003~300x300.image", "level": 3, "description": "铲屎官", "followee_count": 21, "follower_count": 106, "post_article_count": 13, "digg_article_count": 58, "got_digg_count": 1442, "got_view_count": 74691, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 2188, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844904019689734151, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844904082226987021", "article_info": {"article_id": "6844904082226987021", "user_id": "2277843822444958", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844904082226987021", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/3/15/170dd0239516d260~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "全面分析总结BFC原理及实践", "brief_content": "经常在面试中被问到“如何清除浮动?”、“为什么 overflow: hidden 可以清除浮动?”等等比较基础的问题。虽然这些题目案在各种写面试题的文章中都有提供答案,但这种教科书式的问答肯定不是我们的目的,与其记住答案不如彻底掌握其核心原理——块级格式化上下文(BFC)。 这…", "is_english": 0, "is_original": 1, "user_index": 10.552445067798, "original_type": 0, "original_author": "", "content": "", "ctime": "1583399748", "mtime": "1598850028", "rtime": "1583402010", "draft_id": "6845076660425850888", "view_count": 2674, "collect_count": 34, "digg_count": 30, "comment_count": 8, "hot_index": 171, "is_hot": 0, "rank_index": 0.000782, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2277843822444958", "user_name": "dino小恐龙", "company": "", "job_title": "前端研发 @ 公众号「迪诺笔记」", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/5c3e1007f771d435255c167e7c2e3a7e~300x300.image", "level": 2, "description": "2020,愿世界没有灾难,一切皆如春暖花开", "followee_count": 48, "follower_count": 216, "post_article_count": 18, "digg_article_count": 206, "got_digg_count": 620, "got_view_count": 32365, "post_shortmsg_count": 16, "digg_shortmsg_count": 37, "isfollowed": false, "favorable_author": 0, "power": 986, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844904082226987021, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6956481119361433614", "article_info": {"article_id": "6956481119361433614", "user_id": "1732486055606670", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS In JS 的几种方式和设置项目主题等基操实操", "brief_content": "CSS In JS 的几种方式和设置项目主题等基操实操 以下将从迄今主要CSS In JS库主流的三个库进行介绍 styled-components 33.4k star 主要 radium 7.4k", "is_english": 0, "is_original": 1, "user_index": 2.380182355385648, "original_type": 0, "original_author": "", "content": "", "ctime": "1619682058", "mtime": "1619766955", "rtime": "1619766955", "draft_id": "6956477045488484366", "view_count": 451, "collect_count": 2, "digg_count": 3, "comment_count": 2, "hot_index": 27, "is_hot": 0, "rank_index": 0.00077933, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1732486055606670", "user_name": "禾小沐", "company": "", "job_title": "公众号: 禾小沐的技术与生活", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/35bd71e157607ce1c87595859b3a24d9~300x300.image", "level": 1, "description": "Vue React Node.js Deno Python", "followee_count": 37, "follower_count": 2, "post_article_count": 12, "digg_article_count": 171, "got_digg_count": 31, "got_view_count": 1754, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 48, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6956481119361433614, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6996891176599027720", "article_info": {"article_id": "6996891176599027720", "user_id": "1566165107740503", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Css 第三天", "brief_content": "这是我参与8月更文挑战的第5天,活动详情查看: 8月更文挑战 五、背景 1.背景颜色 2.背景图片 3.背景图片的平铺 4.背景图片的定位 5.背景图片的尺寸 6.背景的简写方式 六、渐变 1.线性渐", "is_english": 0, "is_original": 1, "user_index": 1, "original_type": 0, "original_author": "", "content": "", "ctime": "1629090714", "mtime": "1629095639", "rtime": "1629095639", "draft_id": "6996890837292597255", "view_count": 77, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00078095, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1566165107740503", "user_name": "iRoot", "company": "", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/7d259510bbdc3e34943037f27b0499a2~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 0, "post_article_count": 10, "digg_article_count": 0, "got_digg_count": 6, "got_view_count": 598, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 11, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6996891176599027720, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844903777888108557", "article_info": {"article_id": "6844903777888108557", "user_id": "4089838986069310", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903777888108557", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/4/15/16a1e93e4c7a89e7~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "【CSS】Grid 布局总结", "brief_content": "1. 前言 grid 布局是 css 中的一种新的布局方式,对盒子和盒子内容的位置及尺寸有很强的控制能力。与 flex 不同,flex 着重于单轴,而 grid 适应于多轴,下面就来做个简单的介绍。 2. 基本概念 设置 display: grid; 的元素称为容器,它的直接子…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1550309334", "mtime": "1598491406", "rtime": "1550447867", "draft_id": "6845076177476927501", "view_count": 4935, "collect_count": 128, "digg_count": 99, "comment_count": 11, "hot_index": 356, "is_hot": 0, "rank_index": 0.00078001, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4089838986069310", "user_name": "上九天吃瓜", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/f3ee8f104aede002e780a8f96c93bbe9~300x300.image", "level": 2, "description": "", "followee_count": 29, "follower_count": 140, "post_article_count": 17, "digg_article_count": 40, "got_digg_count": 478, "got_view_count": 36629, "post_shortmsg_count": 21, "digg_shortmsg_count": 22, "isfollowed": false, "favorable_author": 0, "power": 844, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903777888108557, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6963991572702560287", "article_info": {"article_id": "6963991572702560287", "user_id": "3747558609661213", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "【必知必会-经典CSS布局系列】=> 让一个元素水平垂直居中,到底有多少种方案?", "brief_content": "一、让一个元素水平垂直居中,到底有多少种方案? 水平居中 对于行内元素: text-align: center; 块级元素 对于确定宽度的块级元素: width和margin实现。margin: 0 ", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1621430674", "mtime": "1621522475", "rtime": "1621480599", "draft_id": "6963991456373866527", "view_count": 281, "collect_count": 5, "digg_count": 9, "comment_count": 1, "hot_index": 24, "is_hot": 0, "rank_index": 0.00077934, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3747558609661213", "user_name": "小只前端攻城狮", "company": "滴滴 | 前端研发工程师", "job_title": "公众号:小攻城狮学前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/4533378875c883604f33d3a3a2e9de5c~300x300.image", "level": 3, "description": "Web全栈开发、持续学习者,关注公众号第一时间接收最新文章,也经常分享一些好用的工具", "followee_count": 40, "follower_count": 114, "post_article_count": 92, "digg_article_count": 323, "got_digg_count": 1575, "got_view_count": 28792, "post_shortmsg_count": 5, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 1862, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6963991572702560287, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6844904030469095432", "article_info": {"article_id": "6844904030469095432", "user_id": "3579665587900030", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844904030469095432", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/12/24/16f36d394c0156f0~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "探索CSS单行文字居中,多行文字居左的实现方式", "brief_content": "琢磨了一下,当时我还真按照产品经理的逻辑,通过js判断一下文字的高度,如果超过一行,就添加一个类名,而且这样的文字很多地方都有,所以还做了遍历,还有最重要的一点是关于方法执行的时机,有可能刚加载的时候高度还获取不到(当时好像还用了定时器,还造成了先居中随后居左跳动的现象)...…", "is_english": 0, "is_original": 1, "user_index": 5.7452355315677, "original_type": 0, "original_author": "", "content": "", "ctime": "1577173180", "mtime": "1598794840", "rtime": "1577178357", "draft_id": "6845076587814060045", "view_count": 2332, "collect_count": 89, "digg_count": 66, "comment_count": 24, "hot_index": 206, "is_hot": 0, "rank_index": 0.0007791, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3579665587900030", "user_name": "XboxYan", "company": "阅文集团", "job_title": "前端UI设计开发#¥#&%工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/e9911b97587d858f2626bcc7a0e3de9e~300x300.image", "level": 3, "description": "", "followee_count": 50, "follower_count": 298, "post_article_count": 25, "digg_article_count": 193, "got_digg_count": 927, "got_view_count": 39758, "post_shortmsg_count": 33, "digg_shortmsg_count": 30, "isfollowed": false, "favorable_author": 0, "power": 1324, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844904030469095432, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6989059501596868615", "article_info": {"article_id": "6989059501596868615", "user_id": "2313028196644621", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "纯css实现三级导航栏", "brief_content": "纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯css实现三级导航栏纯", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1627267264", "mtime": "1627276674", "rtime": "1627276674", "draft_id": "6989057764349706253", "view_count": 129, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 8, "is_hot": 0, "rank_index": 0.00077872, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2313028196644621", "user_name": "slc6666", "company": "新大陆支付", "job_title": "div工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/3bf188e9e23d86a9a1716b7f0c43df06~300x300.image", "level": 1, "description": "", "followee_count": 11, "follower_count": 5, "post_article_count": 20, "digg_article_count": 19, "got_digg_count": 18, "got_view_count": 2043, "post_shortmsg_count": 16, "digg_shortmsg_count": 110, "isfollowed": false, "favorable_author": 0, "power": 38, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6989059501596868615, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6978860907908562951", "article_info": {"article_id": "6978860907908562951", "user_id": "4406498333033918", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7c5f4e6fc9e54cc9b6a4887523572613~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "纯CSS实现随机翻牌效果之 steps 妙用", "brief_content": "翻牌是大家很熟悉的一个互动效果,通常在抽奖活动中出现。那么不借助 JavaScript 是否能够实现随机翻牌效果?翻牌效果肯定是没有问题,CSS 没有随机函数,今天就来分享一个另类的交互实现思路。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624892782", "mtime": "1624938058", "rtime": "1624938058", "draft_id": "6978860408811552799", "view_count": 232, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.00077758, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4406498333033918", "user_name": "天行无忌", "company": "DevPoint", "job_title": "全栈开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/ee5b3d33c959244bf7b70b28bb3a4d07~300x300.image", "level": 3, "description": "技术改变生活、研发构建未来、细节铸造品质", "followee_count": 59, "follower_count": 174, "post_article_count": 139, "digg_article_count": 118, "got_digg_count": 1096, "got_view_count": 186739, "post_shortmsg_count": 84, "digg_shortmsg_count": 42, "isfollowed": false, "favorable_author": 0, "power": 2963, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6978860907908562951, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}, {"article_id": "6961060761976832008", "article_info": {"article_id": "6961060761976832008", "user_id": "1953184231197982", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "五.块元素、行内元素、行内块元素", "brief_content": "1.块元素的特性block 独占一行; 宽度缺省是它父级元素的100%,除非设定一个宽度; 高度、行高、外边距、内边距都可以设置 可以容纳其他内联元素或者其他块元素,一般用于配合css完成网页的基本布", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1620749220", "mtime": "1620788927", "rtime": "1620788927", "draft_id": "6961054886368641054", "view_count": 441, "collect_count": 2, "digg_count": 4, "comment_count": 0, "hot_index": 26, "is_hot": 0, "rank_index": 0.00077607, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1953184231197982", "user_name": "前端_阿珂", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/d569f52bde8b976244f37e00222f4d3e~300x300.image", "level": 2, "description": "没有最好,只有更好", "followee_count": 32, "follower_count": 83, "post_article_count": 35, "digg_article_count": 56, "got_digg_count": 157, "got_view_count": 9967, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 256, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6961060761976832008, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151603280102121640460500544B"}], "cursor": "eyJ2IjoiNzAwNzgwMzIxNDc1ODE1MDE3NSIsImkiOjE4MjB9", "count": 4601, "has_more": true} | [
"[email protected]"
]
| |
dd9814f4339308678dd3b49ec818c02f22cb4071 | 32174f2b74b286a52a2f3b0bfd120a0711bfc6dc | /sample/web/app/blogs/views.py | 1eed5546a6795b31d89b50bb7d975bad02c62a42 | [
"MIT"
]
| permissive | hdknr/django-mediafiles | d13172162506cba2abdab0d85bc2815e2e24b6e6 | 7526e35eb7f532e36c95e7aa76290bb95a9ac41a | refs/heads/master | 2020-06-04T03:16:28.824865 | 2014-10-30T04:10:40 | 2014-10-30T04:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,787 | py | # -*- coding: utf-8 -*-
from django import template
from django.http import HttpResponse,HttpResponseRedirect,Http404
from django.shortcuts import render_to_response
#
import uuid
#
from models import Blog
from forms import BlogForm
from mediafiles.models import MediaFile
from mediafiles.forms import MediaFileForm,media_formset
#
def media(request,id):
m = MediaFile.objects.get(id=id )
return m.response( HttpResponse )
def media_preview(request,id):
return render_to_response('blogs/media/preview.html',
{'media': MediaFile.objects.get(id=id) },
context_instance=template.RequestContext(request))
def blog_edit_simple(request,id):
blog = Blog.objects.get(id=id)
if request.method == "GET":
form = BlogForm(instance=blog,prefix='blog')
media_form = MediaFileForm(prefix='media')
else:
form = BlogForm(request.POST,instance=blog,prefix='blog')
media_form = MediaFileForm(
request.POST,request.FILES,prefix='media')
if form.is_valid() :
form.save()
if media_form.is_valid():
media_form.instance.user = request.user
media_form.instance.slug = uuid.uuid1().hex
media_form.save()
blog.medias.add(media_form.instance)
media_form = MediaFileForm(prefix='media')
return render_to_response('blogs/blog/edit_simple.html',
{'form': form,'media_form':media_form, },
context_instance=template.RequestContext(request))
def blog_edit_formset(request,id):
blog = Blog.objects.get(id=id)
form = BlogForm(request.POST if request.method =="POST" else None ,
instance=blog,prefix='blog')
medias = media_formset(request,blog.medias.all())
if request.method == "POST":
if form.is_valid() :
form.save()
if medias.is_valid():
for media in medias.forms:
if media.is_valid() and media.instance.data:
if media.cleaned_data.get('removing',False):
blog.medias.remove(media.instance)
else:
media.instance.user = request.user if request.user.is_authenticated() else None
media.save()
blog.medias.add(media.instance)
else:
#: error handling
print medias.errors
#: for increase medias.extra_forms after adding new mediafile
medias = media_formset(None,blog.medias.all())
return render_to_response('blogs/blog/edit_formset.html',
{'form': form,'medias':medias, },
context_instance=template.RequestContext(request))
def blog_edit(request,id):
return blog_edit_formset(request,id)
| [
"[email protected]"
]
| |
85be01f3a0b032f92796e0019322f26133fe9c4c | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4456378.3.spec | ebf34658cbfb5bf804c2d9a8c460a0edce625a5e | []
| no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,303 | spec | {
"id": "mgm4456378.3",
"metadata": {
"mgm4456378.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 52391,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1683,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1020,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 49523,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 318,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 8671,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 69513,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 60746,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 14041,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 10035,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 102039,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 5425,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5160,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 8174,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 12003,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 1091462,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 94,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 357,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 27,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1349,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1434,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 572,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 150,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22039,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 4144,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456378.3/file/999.done.species.stats"
}
},
"id": "mgm4456378.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4456378.3"
}
},
"raw": {
"mgm4456378.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4456378.3"
}
}
} | [
"[email protected]"
]
| |
d8d457d18bf39b70f109b94084363f9d7ad6a62d | c7d124bbd7ab66ad7acd50765a0d5c11e7925d16 | /generate_test.py | 402d03607f30aeb012c03c5f3127892e4e05fff1 | []
| no_license | lambdaloop/scheduling | aa240c568eb974d57d7fc93af3cd866293c1f417 | fd111594d2c5a652b42796027195a352db3a9fce | refs/heads/master | 2021-06-03T19:26:16.300509 | 2019-04-14T21:26:28 | 2019-04-14T21:26:28 | 29,323,677 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python2
import csv
import random
f = open('test.csv', 'w')
fieldnames = ['name', 'preferences', 'num_slots']
writer = csv.DictWriter(f, fieldnames)
writer.writeheader()
n_people = 20
n_slots = 9
for i in range(n_people):
#name = chr(65 + i)
name = str(i)
slots = random.randint(1, 3)
pref_num = min(slots + random.randint(0, 3), n_slots)
# if random.random() < 0.2:
# pref_num -= 1
pref = sorted(random.sample(range(n_slots), pref_num))
writer.writerow({
'name': name,
'num_slots': slots,
'preferences': ' '.join(map(str, pref))
})
f.close()
| [
"[email protected]"
]
| |
39de8218dbf4b99aaa6290c59d2f7556322db935 | 371fe9a1fdeb62ad1142b34d732bde06f3ce21a0 | /scripts/extract_loops_seq_approx.py | 36d5e97c42658a9a6bd31287b6fd2f7138e26ba2 | []
| no_license | maickrau/rdna_resolution | 971f3b7e803565c9432be69b8e2a2852f55b8b79 | aab42310c31e655cbbc318331082fa3436d69075 | refs/heads/master | 2023-03-03T05:14:33.966930 | 2021-02-17T20:45:20 | 2021-02-17T20:45:20 | 339,851,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,706 | py | #!/usr/bin/python
import sys
import re
loop_middle = sys.argv[1] # format ACGTAGA...
# loop ends from sys.argv[2]
# loop ends from sys.argv[3]
max_edits = int(sys.argv[4])
# fasta from stdin
# loops to stdout
def revcomp(s):
comp = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
return "".join(comp[c] for c in s[::-1])
loop_ends = set()
for v in sys.argv[2:4]:
loop_ends.add(v) # format ACGACT...
loop_ends.add(revcomp(v))
assert len(v) == len(loop_middle)
rev_loop_middle = revcomp(loop_middle)
def find_seq_positions(seq, query, mismatches):
dp_column = [0]
new_dp_column = [0]
start_index = [0]
new_start_index = [0]
for i in range(0, len(query)):
dp_column.append(i+1)
new_dp_column.append(i+1)
start_index.append(0)
new_start_index.append(0)
result = []
last_score = 0
last_valid = 0
for i in range(0, len(seq)):
new_dp_column[0] = 0
new_start_index[0] = i
new_last_valid = 0
for j in range(0, len(query)):
new_dp_column[j+1] = new_dp_column[j] + 1
new_start_index[j+1] = new_start_index[j]
match_score = 0 if query[j] == seq[i] else 1
if dp_column[j] + match_score < new_dp_column[j+1]:
new_dp_column[j+1] = dp_column[j] + match_score
new_start_index[j+1] = start_index[j]
if dp_column[j+1] + 1 < new_dp_column[j+1]:
new_dp_column[j+1] = dp_column[j+1] + match_score
new_start_index[j+1] = start_index[j+1]
if new_dp_column[j+1] <= mismatches: new_last_valid = j+1
if new_dp_column[j+1] > mismatches and j+1 > last_valid+1 and j+1 > new_last_valid+1:
new_dp_column[-1] = mismatches+1
break
last_valid = new_last_valid
if new_dp_column[-1] <= mismatches:
skip = False
if len(result) > 0:
if result[-1][0] == new_start_index[-1]:
if last_score <= new_dp_column[-1]:
skip = True
else:
result = result[:-1]
if not skip:
result.append((new_start_index[-1], i))
last_score = new_dp_column[-1]
(dp_column, new_dp_column) = (new_dp_column, dp_column)
(start_index, new_start_index) = (new_start_index, start_index)
return result
def output_loops(name, seq, mismatches):
seq = seq.replace('a', 'A').replace('c', 'C').replace('g', 'G').replace('t', 'T')
fw = True
seq_poses = find_seq_positions(seq, loop_middle, mismatches)
if len(seq_poses) == 0:
seq = revcomp(seq)
seq_poses = find_seq_positions(seq, loop_middle, mismatches)
if len(seq_poses) == 0: return
assert len(seq_poses) > 0
loop_start_end_poses = []
for end in loop_ends:
loop_start_end_poses += find_seq_positions(seq, end, mismatches)
if len(loop_start_end_poses) > 2:
print(seq_poses)
print(loop_start_end_poses)
print(name)
assert len(loop_start_end_poses) <= 2
loop_start_end_poses.sort()
loop_middle_poses = [p[0] for p in seq_poses]
loop_start_end_poses = [p[0] for p in loop_start_end_poses]
if len(loop_middle_poses) + len(loop_start_end_poses) == 1: return
if len(loop_start_end_poses) == 1:
if not (loop_start_end_poses[0] < loop_middle_poses[0] or loop_start_end_poses[0] > loop_middle_poses[-1]):
print(name)
print(seq)
print(loop_start_end_poses)
print(loop_middle_poses)
assert loop_start_end_poses[0] < loop_middle_poses[0] or loop_start_end_poses[0] > loop_middle_poses[-1]
if len(loop_start_end_poses) == 2:
if not (loop_start_end_poses[0] < loop_middle_poses[0] and loop_start_end_poses[1] > loop_middle_poses[-1]):
print(name)
print(seq)
print(loop_start_end_poses)
print(loop_middle_poses)
assert loop_start_end_poses[0] < loop_middle_poses[0] and loop_start_end_poses[1] > loop_middle_poses[-1]
loop_seqs = []
if (len(loop_start_end_poses) == 1 and loop_start_end_poses[0] < loop_middle_poses[0]) or len(loop_start_end_poses) == 2:
assert loop_start_end_poses[0] < loop_middle_poses[0]
loop_seqs.append(seq[loop_start_end_poses[0]:loop_middle_poses[0]+len(loop_middle)])
for i in range(1, len(loop_middle_poses)):
loop_seqs.append(seq[loop_middle_poses[i-1]:loop_middle_poses[i]+len(loop_middle)])
if (len(loop_start_end_poses) == 1 and loop_start_end_poses[0] > loop_middle_poses[-1]) or len(loop_start_end_poses) == 2:
assert loop_start_end_poses[-1] > loop_middle_poses[-1]
loop_seqs.append(seq[loop_middle_poses[-1]:loop_start_end_poses[-1]+len(loop_middle)])
assert len(loop_seqs) > 0
for i in range(0, len(loop_seqs)):
print(name + "_loop_" + str(i) + "\t" + loop_seqs[i])
current_seq = ""
current_name = ""
for l in sys.stdin:
if l[0] == '>':
if len(current_seq) > 0:
output_loops(current_name, current_seq, max_edits)
current_name = l[1:].strip().split(' ')[0].strip()
current_seq = ""
else:
current_seq += l.strip()
if len(current_seq) > 0:
output_loops(current_name, current_seq, max_edits)
| [
"[email protected]"
]
| |
966016a6c669ca24a56fb831863b51dfbec863e3 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/434e9e1d8f62ba49daa2f720956e048e336b3c9c-<clone>-bug.py | 3c358f1301ecb059826eef335a56b1bc630c3ce2 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py |
def clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit):
' makes a new git repo if it does not already exist '
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
if depth:
if ((version == 'HEAD') or refspec):
cmd.extend(['--depth', str(depth)])
elif (is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)):
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
module.warn('Ignoring depth argument. Shallow clones are only available for HEAD, branches, tags or in combination with refspec.')
if reference:
cmd.extend(['--reference', str(reference)])
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if (remote != 'origin'):
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
| [
"[email protected]"
]
| |
6f5371f76a88a1c3070db0af82abb2248dbd8564 | 456703d469684b99d849bb386707359729af4e1e | /data.py | 50649d1c690eab4c7947b38c8524416fbd32fd2e | []
| no_license | blacklemons/userlist_mongo | 9c05949f4d2a8c4232c1126020b66ad892857bc6 | b341628cc5da8248c39e46f5a0e974807d9986d1 | refs/heads/main | 2023-05-05T11:49:13.272471 | 2021-06-03T06:56:56 | 2021-06-03T06:56:56 | 372,990,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | def Articles(title,description,author,edit):
articles = {
'title' : title,
'description' : description,
'author' : author,
'edit' : edit,
}
return articles
def Users(name, email, username, password):
users = {
'name' : name,
'email' : email,
'username' : username,
'password': password
}
return users | [
"[email protected]"
]
| |
67d9459b98c02585b18afecaf8c3df0f88840736 | 95e9ec4b3b0d86063da53a0e62e138cf794cce3a | /python/20190430/模块/demo09.py | b8805798af569441c98514893091d564e1240813 | []
| no_license | wjl626nice/1902 | c3d350d91925a01628c9402cbceb32ebf812e43c | 5a1a6dd59cdd903563389fa7c73a283e8657d731 | refs/heads/master | 2023-01-05T23:51:47.667675 | 2019-08-19T06:42:09 | 2019-08-19T06:42:09 | 180,686,044 | 4 | 1 | null | 2023-01-04T07:35:24 | 2019-04-11T00:46:43 | Python | UTF-8 | Python | false | false | 369 | py | # pillow
from PIL import Image
# 打开一个jpg图像文件,注意是当前路径:
im = Image.open('1.png')
# 获得图像尺寸:
w, h = im.size
print('Original image size: %sx%s' % (w, h))
# 缩放到50%:
im.thumbnail((w // 2, h // 2))
print('Resize image to: %sx%s' % (w // 2, h // 2))
# 把缩放后的图像用jpeg格式保存:
im.save('thumbnail.png', 'png')
| [
"[email protected]"
]
| |
a3cddb14f4568125e0ea0d932da937365312500e | ae7cb8543a98b7d65295a422c7971e7a37f921cd | /minerals/models.py | 02bcb451cf3a5a60d68581cbcd838dfa627fcbad | []
| no_license | frankRose1/mineral-catalog | a74386278073d1b9e92fe44e1cc348a1b498380e | 88f91a55105532fe197c84d050a5d5bd67167a9d | refs/heads/master | 2020-04-16T12:49:23.715372 | 2019-03-14T00:23:27 | 2019-03-14T00:23:27 | 165,597,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | from django.db import models
# Create your models here.
class Mineral(models.Model):
"""
Not all entries will have every field as shown in the json file in "mineral_data/mineral.json"
"""
name = models.CharField(max_length=255, blank=True, default='')
image_filename = models.CharField(max_length=255, blank=True, default='')
image_caption = models.TextField(blank=True, default='')
category = models.CharField(max_length=255, blank=True, default='')
formula = models.CharField(max_length=255, blank=True, default='')
strunz_classification = models.CharField(max_length=255, blank=True, default='')
color = models.CharField(max_length=255, blank=True, default='')
crystal_system = models.CharField(max_length=255, blank=True, default='')
unit_cell = models.CharField(max_length=255, blank=True, default='')
crystal_symmetry = models.CharField(max_length=255, blank=True, default='')
cleavage = models.CharField(max_length=255, blank=True, default='')
mohs_scale_hardness = models.CharField(max_length=255, blank=True, default='')
luster = models.CharField(max_length=255, blank=True, default='')
streak = models.CharField(max_length=255, blank=True, default='')
diaphaneity = models.CharField(max_length=255, blank=True, default='')
optical_properties = models.CharField(max_length=255, blank=True, default='')
refractive_index = models.CharField(max_length=255, blank=True, default='')
crystal_habit = models.CharField(max_length=255, blank=True, default='')
specific_gravity = models.CharField(max_length=255, blank=True, default='')
group = models.CharField(max_length=255, blank=True, default='') | [
"[email protected]"
]
| |
94099f7539dd29af5d9baf1b7e65aae919dc5eb1 | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/response_body_post_enum_with1_does_not_match_true_response_body_for_content_types/post.py | 6c03a7fcfbbe3990bd37e435d7afe3c07aa49f1e | [
"Apache-2.0"
]
| permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 4,699 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.enum_with1_does_not_match_true import EnumWith1DoesNotMatchTrue
from . import path
SchemaFor200ResponseBodyApplicationJson = EnumWith1DoesNotMatchTrue
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostEnumWith1DoesNotMatchTrueResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_enum_with1_does_not_match_true_response_body_for_content_types(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
]
| |
533a8674b5a0ea2d97c2032ad2269f7fe0835047 | 818173671edf15d7c6d775ed003bcd35608233f9 | /demos/go/wscript | 13ea4561dd2b6d1e18137a02eb74523dc6ffdb69 | []
| no_license | zsx/waf | a1e87e079e22443ae3ed98e08cefc705b5f73906 | 66d1c6ede4ceda66a98dbbf9dd473f1d5c5752ba | refs/heads/master | 2021-01-13T12:56:12.379186 | 2010-07-12T17:27:13 | 2010-07-12T17:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | #!/usr/bin/env python
# encoding: utf-8
# Tom Wambold tom5760 gmail
# Thomas Nagy, 2010 (ita)
"""
if libgmp is present, try building with 'waf --exe'
"""
top = '.'
out = 'build'
def options(opt):
opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
def configure(ctx):
ctx.check_tool('go')
try:
ctx.check_tool('gcc')
ctx.check_cc(fragment='#include <gmp.h>\nint main() {return 0;}\n', uselib_store='GMP', lib='gmp')
except ctx.errors.ConfigurationError:
ctx.env.TRY_CGO = False
else:
ctx.env.TRY_CGO = True
def build(ctx):
ctx(
features = 'go gopackage',
target = 'other',
source = [
'other/a.go',
#'other/b.go', # using two source files for gopack does not seem to work anymore
],
)
ctx(
features = 'go goprogram',
target = 'test',
uselib_local = 'other',
source = 'main.go',
includes = '.',
)
if ctx.env.TRY_CGO:
# see http://code.google.com/p/go/issues/detail?id=533
# so we have to move the files, grrrrr
ctx(name='cgo', rule='${CGO} ${SRC} && mv ${gen.path.abspath()}/${TGT[0].name} ${gen.path.abspath()}/${TGT[1].name} ${TGT[0].parent.abspath()}',
target='gmp.cgo1.go gmp.cgo2.c gmp.cgo2.c _cgo_gotypes.go _cgo_defun.c',
source='gmp.go',
shell=True)
ctx(features='c cshlib', source='gmp.cgo2.c', target=ctx.path.find_or_declare('cgo_gmp.so'), uselib='GMP')
ctx.add_group()
ctx(features='go goprogram', source='pi.go', target='pi')
from waflib import Options, Utils
if ctx.env.TRY_CGO and Options.options.exe:
def exe(bld):
p = Utils.subprocess.Popen('LD_LIBRARY_PATH=build ./build/pi', shell=True)
p.wait()
ctx.add_post_fun(exe)
| [
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
]
| tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85 |
|
9700f7b32038e32409736e25ab200fda2427f5dd | 46dd1ad6fe93777a4dce84166b64cb9adb679e62 | /test/functional/interface_bitcoin_cli.py | be8c48fd1a8c0abb45f0f9939a095625cc50b098 | [
"MIT"
]
| permissive | aleomartinez/EducaCoin | 14266500dc3c5aabfe8bebf17c8903aecea0af8c | 2282d6affdd2192a79efdce579ddd0d8576d950d | refs/heads/master | 2020-03-31T01:28:49.612215 | 2018-10-05T14:43:36 | 2018-10-05T14:43:36 | 151,783,304 | 0 | 0 | MIT | 2018-10-05T22:11:11 | 2018-10-05T22:11:11 | null | UTF-8 | Python | false | false | 3,669 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test educacoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from gewalletinfo RPC and `educacoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `educacoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `educacoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| [
"[email protected]"
]
| |
14b4d0089b4e2b8e65d9e95bec9a45f3a2d224f8 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/dabbler.py | 1ea1b9cd70fbca21dc35207bee1746ae0a696f43 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 62 | py | ii = [('CrokTPS.py', 1), ('ThomGLG.py', 1), ('BeckWRE.py', 1)] | [
"[email protected]"
]
| |
82cbf15cb149b4e72ab5811f65c9fee2d676ee8d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_noisy959.py | a9b2211c27ba41c3a2a043b44c15010fd25e7f71 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=23
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=21
c.append(cirq.H.on(input_qubit[0])) # number=22
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[2])) # number=19
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy959.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
]
| |
624f9ac70390eb2b0177c09ce025a3b00a2bd5ec | 87a66fbed380353955cc6160c8fffe56dee785be | /bert2bert.py | 3ff4c7f15e16f72c1b0e7a7ec1fec8f221d33179 | [
"Apache-2.0"
]
| permissive | JngHyun/2021-BOAZ-bert2bert | ca84e8f4ad444f27a31ac8f74469826adefe3a19 | 63e95cc87b231ebf344950df80a43abc1139cb7d | refs/heads/main | 2023-03-27T01:13:35.747955 | 2021-03-25T07:17:39 | 2021-03-25T07:17:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | from typing import List
from transformers import (
EncoderDecoderModel,
BertConfig,
EncoderDecoderConfig,
BertModel, BertTokenizer,
)
from transformers.modeling_bart import shift_tokens_right
from kobert_transformers import get_tokenizer
from lightning_base import LightningBase
import torch
class Bert2Bert(LightningBase):
def __init__(
self,
model_save_path: str,
batch_size: int,
num_gpus: int,
max_len: int = 128,
lr: float = 3e-5,
weight_decay: float = 1e-4,
save_step_interval: int = 1000,
accelerator: str = "ddp",
precision: int = 16,
use_amp: bool = True,
) -> None:
super(Bert2Bert, self).__init__(
model_save_path=model_save_path,
max_len=max_len,
batch_size=batch_size,
num_gpus=num_gpus,
lr=lr,
weight_decay=weight_decay,
save_step_interval=save_step_interval,
accelerator=accelerator,
precision=precision,
use_amp=use_amp,
)
encoder_config = BertConfig.from_pretrained("monologg/kobert")
decoder_config = BertConfig.from_pretrained("monologg/kobert")
config = EncoderDecoderConfig.from_encoder_decoder_configs(
encoder_config, decoder_config
)
self.model = EncoderDecoderModel(config)
self.tokenizer = KoBertTokenizer()
state_dict = BertModel.from_pretrained("monologg/kobert").state_dict()
self.model.encoder.load_state_dict(state_dict)
self.model.decoder.bert.load_state_dict(state_dict, strict=False)
# cross attention이랑 lm head는 처음부터 학습
def training_step(self, batch, batch_idx):
src, tgt = batch[0], batch[1]
src_input = self.tokenizer.encode_batch(src, max_length=self.max_len)
tgt_input = self.tokenizer.encode_batch(tgt, max_length=self.max_len)
input_ids = src_input["input_ids"].to(self.device)
attention_mask = src_input["attention_mask"].to(self.device)
labels = tgt_input["input_ids"].to(self.device)
decoder_input_ids = shift_tokens_right(
labels, self.tokenizer.token2idx["[PAD]"]
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
)
lm_logits = outputs[0]
loss_fn = torch.nn.CrossEntropyLoss(
ignore_index=self.tokenizer.token2idx["[PAD]"]
)
lm_loss = loss_fn(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1))
self.save_model()
return {"loss": lm_loss}
class KoBertTokenizer(object):
def __init__(self):
self.tokenizer = get_tokenizer()
self.token2idx = self.tokenizer.token2idx
self.idx2token = {v: k for k, v in self.token2idx.items()}
def encode_batch(self, x: List[str], max_length):
max_len = 0
result_tokenization = []
for i in x:
tokens = self.tokenizer.encode(i, max_length=max_length, truncation=True)
result_tokenization.append(tokens)
if len(tokens) > max_len:
max_len = len(tokens)
padded_tokens = []
for tokens in result_tokenization:
padding = (torch.ones(max_len) * self.token2idx["[PAD]"]).long()
padding[: len(tokens)] = torch.tensor(tokens).long()
padded_tokens.append(padding.unsqueeze(0))
padded_tokens = torch.cat(padded_tokens, dim=0).long()
mask_tensor = torch.ones(padded_tokens.size()).long()
attention_mask = torch.where(
padded_tokens == self.token2idx["[PAD]"], padded_tokens, mask_tensor * -1
).long()
attention_mask = torch.where(
attention_mask == -1, attention_mask, mask_tensor * 0
).long()
attention_mask = torch.where(
attention_mask != -1, attention_mask, mask_tensor
).long()
return {
"input_ids": padded_tokens.long(),
"attention_mask": attention_mask.long(),
}
def decode(self, tokens):
# remove special tokens
# unk, pad, cls, sep, mask
tokens = [token for token in tokens
if token not in [0, 1, 2, 3, 4]]
decoded = [self.idx2token[token] for token in tokens]
if "▁" in decoded[0] and "▁" in decoded[1]:
# fix decoding bugs
tokens = tokens[1:]
return self.tokenizer.decode(tokens)
def decode_batch(self, list_of_tokens):
return [self.decode(tokens) for tokens in list_of_tokens]
| [
"[email protected]"
]
| |
0042ca91b39b2b86022ac9bc913b347ab174bb78 | 5966449d2e29c9b64351895db2932f94f9de42da | /catkin_ws/build/polly_speech/catkin_generated/generate_cached_setup.py | 5e0cad45e5c1737bb42a7d7059d9ff24b74321b6 | []
| no_license | godaeseong/GoHriProject | 8cbce6934485b8ba3253fc7b6c5b5b59397b4518 | 425e70b7c91b6215f5477fc2250d2b0ac96577be | refs/heads/master | 2021-05-11T22:11:56.099580 | 2018-01-15T02:20:43 | 2018-01-15T02:20:43 | 117,484,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/hri/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/hri/catkin_ws/devel/.private/polly_speech/env.sh')
output_filename = '/home/hri/catkin_ws/build/polly_speech/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
]
| |
63bb8cfeb317f62af40c54d6d6ba604724dc264e | 691d3f3e04d354e11772335064f33245e1ed8c28 | /lib/galaxy/datatypes/indexers/coverage.py | 2bfec91c25fb05f513831206a3471b71db5749ea | [
"CC-BY-2.5",
"MIT"
]
| permissive | dbcls/dbcls-galaxy | 934a27cc13663549d5208158fc0b2821609399a8 | 6142165ef27f6a02aee42f26e0b94fed67ecc896 | refs/heads/master | 2016-09-05T22:53:27.553419 | 2009-09-09T06:35:28 | 2009-09-09T06:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | #!/usr/bin/env python
"""
Read a chromosome of coverage data, and write it as a npy array, as
well as averages over regions of progessively larger size in powers of 10
"""
from __future__ import division
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.wiggle
from bx.cookbook import doc_optparse
from bx import misc
max2 = max
pkg_resources.require("numpy>=1.2.1")
from numpy import *
import tempfile
import os
def write_chrom(max, out_base, instream):
scores = zeros( max, float32 ) * nan
# Fill array from wiggle
max_value = 0
min_value = 0
for line in instream:
line = line.rstrip("\n\r")
(chrom, pos, val) = line.split("\t")
pos, val = int(pos), float(val)
scores[pos] = val
# Write ra
fname = "%s_%d" % ( out_base, 1 )
save( fname, scores )
os.rename( fname+".npy", fname )
# Write average
for window in 10, 100, 1000, 10000, 100000:
input = scores.copy()
size = len( input )
input.resize( ( ( size / window ), window ) )
masked = ma.masked_array( input, isnan( input ) )
averaged = mean( masked, 1 )
averaged.set_fill_value( nan )
fname = "%s_%d" % ( out_base, window )
save( fname, averaged.filled() )
del masked, averaged
os.rename( fname+".npy", fname )
def main():
max = int( 512*1024*1024 )
# get chroms and lengths
chroms = {}
LEN = {}
for line in open(sys.argv[1],"r"):
line = line.rstrip("\r\n")
fields = line.split("\t")
(chrom, pos, forward) = fields[0:3]
reverse = 0
if len(fields) == 4: reverse = int(fields[3])
forward = int(forward)+reverse
pos = int(pos)
chrom_file = chroms.get(chrom, None)
if not chrom_file:
chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile()
chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,forward))
LEN[chrom] = max2( LEN.get(chrom,0), pos+1 )
for chrom, stream in chroms.items():
stream.seek(0)
prefix = os.path.join(sys.argv[2], chrom)
write_chrom( LEN[chrom], prefix, stream )
manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" )
for key, value in LEN.items():
print >> manifest_file, "%s\t%s" % (key, value)
manifest_file.close()
if __name__ == "__main__": main()
| [
"[email protected]"
]
| |
cad859d418ce2e7f4caf5b06ea68e3865b327913 | 199145122c35976fbfc22f2d709458bf67772f95 | /apps/hosts/urls.py | 8d52a9078d69ec7982ad7b9679e2e9841d265abf | [
"Apache-2.0"
]
| permissive | yhgnice/toolsvb | 6109adbce89dd645da342d619acbcaca31b11efa | 35f9d27ee2439d134cab160a7cf930ea13a31d26 | refs/heads/master | 2020-05-24T05:15:40.112999 | 2017-03-14T06:32:23 | 2017-03-14T06:32:23 | 84,824,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Nice... on '2017/3/9 20:32'
from django.conf.urls import url
from .views import RebootServices
urlpatterns = [
# 主机工具
url(r'^tools/$', RebootServices.as_view(), name="tools"),
]
| [
"123"
]
| 123 |
607c473b42712149c56ebae6342712cfda2c7ff2 | a289ad8df7840045a659db4f0f936b09494243b3 | /gruvi/error.py | d0483517992c9c2b8de1ea8e0e33f2443658ca33 | [
"MIT"
]
| permissive | tijko/gruvi | a29414bc757f9b19cfc457df36e270c5fefef183 | 558faa181390dfac83cd42fdcafb1850008e4ac5 | refs/heads/master | 2020-12-11T05:34:23.897520 | 2014-01-04T06:59:29 | 2014-01-04T06:59:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2013 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import sys
from pyuv.error import UVError
__all__ = ['Error', 'Timeout', 'Cancelled']
Error = UVError
class Timeout(Error):
"""A timeout has occurred."""
class Cancelled(Error):
"""A fiber or calback was cancelled."""
# The following is a pretty bad hack.. We want to use Sphinx's "automodule" to
# document most of our modules in the API reference section, and we want it to
# show inherited members. The result is that it shows an ugly "with_traceback"
# method for gruvi.Error. We fix that by setting that method to None if and
# only if we are running under Sphinx.
if hasattr(sys, 'running_under_sphinx'):
Error.with_traceback = None
| [
"[email protected]"
]
| |
4f0ed701e5f9fc81b15e8550d01102e7412e4ae4 | 3665e5e6946fd825bb03b3bcb79be96262ab6d68 | /jc/parsers/df.py | 817f0a21bbaf233bfe3351eeefdabda7f96d9361 | [
"MIT",
"BSD-3-Clause"
]
| permissive | philippeitis/jc | a28b84cff7fb2852a374a7f0f41151b103288f26 | d96b3a65a98bc135d21d4feafc0a43317b5a11fa | refs/heads/master | 2021-02-16T05:03:03.022601 | 2020-03-04T16:30:52 | 2020-03-04T16:30:52 | 244,969,097 | 0 | 0 | MIT | 2020-03-08T21:10:36 | 2020-03-04T18:01:38 | null | UTF-8 | Python | false | false | 5,176 | py | """jc - JSON CLI output utility df Parser
Usage:
specify --df as the first argument if the piped input is coming from df
Compatibility:
'linux', 'darwin'
Examples:
$ df | jc --df -p
[
{
"filesystem": "devtmpfs",
"1k_blocks": 1918820,
"used": 0,
"available": 1918820,
"use_percent": 0,
"mounted_on": "/dev"
},
{
"filesystem": "tmpfs",
"1k_blocks": 1930668,
"used": 0,
"available": 1930668,
"use_percent": 0,
"mounted_on": "/dev/shm"
},
{
"filesystem": "tmpfs",
"1k_blocks": 1930668,
"used": 11800,
"available": 1918868,
"use_percent": 1,
"mounted_on": "/run"
},
...
]
$ df | jc --df -p -r
[
{
"filesystem": "devtmpfs",
"1k_blocks": "1918820",
"used": "0",
"available": "1918820",
"use_percent": "0%",
"mounted_on": "/dev"
},
{
"filesystem": "tmpfs",
"1k_blocks": "1930668",
"used": "0",
"available": "1930668",
"use_percent": "0%",
"mounted_on": "/dev/shm"
},
{
"filesystem": "tmpfs",
"1k_blocks": "1930668",
"used": "11800",
"available": "1918868",
"use_percent": "1%",
"mounted_on": "/run"
},
...
]
"""
import jc.utils
import jc.parsers.universal
class info():
version = '1.1'
description = 'df command parser'
author = 'Kelly Brazil'
author_email = '[email protected]'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin']
magic_commands = ['df']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Structured data with the following schema:
[
{
"filesystem": string,
"size": string,
"1k_blocks": integer,
"512_blocks": integer,
"used": integer,
"available": integer,
"capacity_percent": integer,
"ifree": integer,
"iused": integer,
"use_percent": integer,
"iused_percent": integer,
"mounted_on": string
}
]
"""
for entry in proc_data:
# change 'avail' to 'available'
if 'avail' in entry:
entry['available'] = entry.pop('avail')
# change 'use%' to 'use_percent'
if 'use%' in entry:
entry['use_percent'] = entry.pop('use%')
# change 'capacity' to 'capacity_percent'
if 'capacity' in entry:
entry['capacity_percent'] = entry.pop('capacity')
# change '%iused' to 'iused_percent'
if '%iused' in entry:
entry['iused_percent'] = entry.pop('%iused')
# change any entry for key with '_blocks' in the name to int
for k in entry:
if str(k).find('_blocks') != -1:
try:
blocks_int = int(entry[k])
entry[k] = blocks_int
except (ValueError):
entry[k] = None
# remove percent sign from 'use_percent', 'capacity_percent', and 'iused_percent'
if 'use_percent' in entry:
entry['use_percent'] = entry['use_percent'].rstrip('%')
if 'capacity_percent' in entry:
entry['capacity_percent'] = entry['capacity_percent'].rstrip('%')
if 'iused_percent' in entry:
entry['iused_percent'] = entry['iused_percent'].rstrip('%')
# change used, available, use_percent, capacity_percent, ifree, iused, iused_percent to int
int_list = ['used', 'available', 'use_percent', 'capacity_percent', 'ifree', 'iused', 'iused_percent']
for key in int_list:
if key in entry:
try:
key_int = int(entry[key])
entry[key] = key_int
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = data.splitlines()
# fix headers
cleandata[0] = cleandata[0].lower()
cleandata[0] = cleandata[0].replace('-', '_')
cleandata[0] = cleandata[0].replace('mounted on', 'mounted_on')
# parse the data
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
if raw:
return raw_output
else:
return process(raw_output)
| [
"[email protected]"
]
| |
b4d795b37c2d38a9e60a530baac9859a0f9c353c | 032369c2fb0d441234955fa05ae578bf37d9cc2f | /Final_Exam/6.py | c471bd4299388112fa0e2250ecea9b174134e436 | []
| no_license | duochen/Python-Beginner | 87b68ca5cd4dde6299174e6702ac775e51adaa93 | 8585b8065841b1465b23e46b504da681ab136926 | refs/heads/master | 2023-04-29T16:38:03.047585 | 2022-08-22T12:45:04 | 2022-08-22T12:45:04 | 163,430,002 | 1 | 2 | null | 2023-04-21T20:36:09 | 2018-12-28T16:35:46 | HTML | UTF-8 | Python | false | false | 20 | py | print((1,2) + (3,4)) | [
"[email protected]"
]
| |
75b7d13d54b027208ae7bf83b28a23721fbfcc21 | d44c989d1082ec91ae420569f79e39105ec9adf0 | /Convert Binary Number in a Linked List to Integer.py | d8732b80748c73d1c4539378955d8afbfe29849a | []
| no_license | hsinhuibiga/Nov | c5a79e265a6afcfd03f04e23914d3924129c6389 | 15b2f37f96ded183ab3507a95985900a9d5d3ddc | refs/heads/main | 2023-01-14T13:11:02.568683 | 2020-11-22T13:40:36 | 2020-11-22T13:40:36 | 309,392,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | #Convert Binary Number in a Linked List to Integer
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def getDecimalValue(self, head):
"""
:type head: ListNode
:rtype: int
"""
binary = ''
while head != None:
binary += str(head.val)
head = head.next
return int(binary,2) | [
"[email protected]"
]
| |
36c519d580be12825f3bf43d17406404d77ab275 | aba2dd3ed154e1307e3ffb360d22c400bc8e17ab | /bib2json.py | 2688d16b53929bd3986a2b8509fc9d5acfecb38d | [
"MIT"
]
| permissive | SujiKim6/rebiber | 3938406df01e5aa61a9d9cf24bb74bd4fed82787 | 6617792dbfdb860d96f15027210381215c63685d | refs/heads/main | 2023-02-21T08:22:52.610870 | 2021-01-26T09:41:06 | 2021-01-26T09:57:02 | 332,980,291 | 0 | 0 | MIT | 2021-01-26T05:19:34 | 2021-01-26T05:19:33 | null | UTF-8 | Python | false | false | 1,802 | py | import json
import re
import sys
import bibtexparser
import argparse
from tqdm import tqdm
def normalize_title(title_str):
title_str = re.sub(r'[^a-zA-Z]',r'', title_str)
return title_str.lower().replace(" ", "").strip()
def load_bib_file(bibpath="acl.bib"):
all_bib_entries = []
with open(bibpath) as f:
bib_entry_buffer = []
for line in f.readlines():
# line = line.strip()
bib_entry_buffer.append(line)
if line == "}\n":
all_bib_entries.append(bib_entry_buffer)
bib_entry_buffer = []
return all_bib_entries
def buil_json(all_bib_entries):
all_bib_dict = {}
num_expections = 0
for bib_entry in tqdm(all_bib_entries[:]):
bib_entry_str = " ".join([line for line in bib_entry if "month" not in line.lower()]).lower()
try:
bib_entry_parsed = bibtexparser.loads(bib_entry_str)
bib_key = normalize_title(bib_entry_parsed.entries[0]["title"])
all_bib_dict[bib_key] = bib_entry
except Exception as e:
print(bib_entry)
print(e)
num_expections += 1
return all_bib_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_bib", default="data/acl.bib",
type=str, help="The input bib file")
parser.add_argument("-o", "--output_json", default="data/acl.json",
type=str, help="The output bib file")
args = parser.parse_args()
all_bib_entries = load_bib_file(args.input_bib)
all_bib_dict = buil_json(all_bib_entries)
with open(args.output_json, "w") as f:
json.dump(all_bib_dict, f, indent=2)
| [
"[email protected]"
]
| |
e0e468f23e653c4f37ed38f070b78741ba8bdf07 | 5ca4a0d91f5bd119e80478b5bd3d43ed30133a42 | /film20/config/urls/pl.py | 65a13447c224a832a15c19ca2748822deec504d0 | []
| no_license | thuvh/filmmaster | 1fc81377feef5a9e13f792b329ef90f840404ec5 | dd6a2ee5a4951b2397170d5086c000169bf91350 | refs/heads/master | 2021-01-17T16:10:54.682908 | 2012-04-29T18:19:52 | 2012-04-29T18:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,941 | py | #-------------------------------------------------------------------------------
# Filmaster - a social web network and recommendation engine
# Copyright (c) 2009 Filmaster (Borys Musielak, Adam Zielinski).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Project
from film20.config.templates import templates
success = "sukces"
error = "error"
def full_url(key):
try:
from django.conf import settings
DOMAIN = settings.DOMAIN
FULL_DOMAIN = settings.FULL_DOMAIN
except:
DOMAIN = ''
FULL_DOMAIN = ''
return (FULL_DOMAIN or DOMAIN) + '/'+urls[key]+'/'
urls = {
### LEGACY STUFF FOR COMPATIBILITY: FLM-1185 ###
"BLOG_POST_OLD": "notka",
"SHORT_REVIEW_OLD": "krotka-recenzja",
### PUBLIC ###
"FIRST_TIME_INFO": "pierwsze-kroki",
"FAQ": "faq",
"MAIN": "",
"ADMIN": "admin",
"FILM": "film",
"RATE_FILMS": "oceniaj-filmy",
"RATE_FILMS_FAST_FORWARD": "oceniarka",
"RATE_NEXT": "ocen-nastepny",
"PERSON": "osoba",
"SEARCH": "szukaj",
"SEARCH_FILM": "szukaj-filmu",
"SEARCH_PERSON": "szukaj-osoby",
"BLOG": "blog",
"ARTICLE": "artykul",
"CHECKIN": "checkin",
"ARTICLES":"artykuly",
"ARTICLES_OLD":"notki",
"PLANET": "planeta",
"RECENT_ANSWERS": "odpowiedzi",
"PLANET_FOLLOWED": "planeta/obserwowani",
"POSTER": "plakat",
# kokpit
"DASHBOARD": "kokpit",
# publiczne profile
"SHOW_PROFILE": "profil",
"LIST_PROFILES": "lista-profili", #TODO: is this required?
"USER_ARTICLES": "artykuly-filmastera", #TODO: is this required?
"FILMS": "filmy",
"AGENDA": "agenda",
# ogolne do wykorzystania w linkach
"RATING": "ocena",
"EDIT": "edytuj",
"PREVIOUS": "poprzedni",
"NEXT": "nastepny",
"FEED": "feed",
"FILMS_FOR_TAG": "filmy",
"RANKING": "rankingi",
"RATINGS": "oceny",
"RECOMMENDATIONS": "rekomendacje",
"COMPUTE": "przelicz",
"TOP_USERS": "filmasterzy",
"FOLLOWED": "obserwowani",
"FOLLOWERS": "obserwujacy",
"SIMILAR_USERS": "podobni-uzytkownicy",
"SIMILAR_USERS_FOLLOWING": "podobni-uzytkownicy-obserwowani",
"SIMILAR_USERS_FOLLOWED": "podobni-uzytkownicy-obserwujacy",
# "COMPUTE_PROBABLE_SCORES": "wylicz-rekomendacje",
"FILMBASKET": "koszyk",
"OWNED": "kolekcja",
"WISHLIST": "wishlista",
"SHITLIST": "shitlista",
"TAG": "tag",
"SHOW_TAG_PAGE": "tag",
"ADD_TO_BASKET": "dodaj-do-koszyka",
"REGIONAL_INFO": "informacje-regionalne",
"AJAX": "ajax",
# strony statyczne (TODO: zastapic flatpages?)
"TERMS": "regulamin",
"PRIVACY": "prywatnosc",
"LICENSE": "licencja",
"CONTACT": "kontakt",
"ABOUT": "redakcja",
"COOPERATION": "wspolpraca",
"BANNERS": "banery",
"ADVERTISEMENT": "reklama",
"AVATAR_HOWTO": "awatar-howto",
"FORMATTING_POSTS": "formatowanie",
### PRIVATE ###
# logowanie i rejestracja
"ACCOUNT": "dashboard",
"OPENID_ASSOCIATIONS": "openid/associations",
"ASSIGN_FACEBOOK":"fb/assign_facebook",
"EDIT_FACEBOOK":"fb/edit",
"LOGIN": "konto/login",
"LOGOUT": "konto/logout",
"CHANGE_PASSWORD": "zmien-haslo",
"RESET_PASSWORD": "konto/reset-hasla",
"RESET_PASSWORD_CONFIRM": "konto/reset-hasla/potwierdzenie",
"RESET_PASSWORD_COMPLETE": "konto/reset-hasla/koniec",
"RESET_PASSWORD_DONE": "konto/reset-hasla/sukces",
"REGISTRATION": "konto/rejestracja",
"REGISTRATION_ACTIVATE": "konto/rejestracja/aktywacja",
"REGISTRATION_COMPLETE": "konto/rejestracja/koniec",
"ASSOCIATIONS": "edytuj-powiazane-serwisy",
"OAUTH_LOGIN": "konto/oauth-login",
"OAUTH_LOGIN_CB": "konto/oauth-login-cb",
"OAUTH_NEW_USER": "konto/oauth/nowy",
# friends and invitations
"MANAGE_INVITATIONS": "konto/zaproszenia",
"ACCEPT_INVITATION": "konto/akceptuj-zaproszenie",
"REFUSE_INVITATION": "konto/odrzuc-zaproszenie",
# old notifications - TODO: remove
"NOTIFICATIONS": "konto/powiadomienia",
"NOTIFICATION": "konto/powiadomienia/powiadomienie",
"MARK_NOTIFICATIONS_AS_READ": "konto/powiadomienia/oznacz-jako-przeczytane",
"PW": "pw",
"PW_INBOX": "odebrane",
"PW_OUTBOX": "wyslane",
"PW_COMPOSE": "stworz",
"PW_REPLY": "odpowiedz",
"PW_VIEW": "zobacz",
"PW_DELETE": "usun",
"PW_CONV_DELETE": "usun-watek",
"PW_CONV_VIEW": "zobacz-watek",
"PW_UNDELETE": "przywroc",
"PW_TRASH": "kosz",
#export
"EXPORT_RATINGS":"pobierz",
# profiles
"CREATE_PROFILE": "konto/stworz-profil",
"EDIT_PROFILE": "konto/edytuj-profil",
"EDIT_PROFILE_DONE": "konto/edytuj-profil/sukces",
"EDIT_LOCATION": "edytuj-lokalizacje",
"DELETE_PROFILE": "konto/usun-profil",
"DELETE_PROFILE_DONE": "konto/usun-profil/sukces",
"EDIT_AVATAR": "konto/edytuj-awatar",
"CROP_AVATAR": "konto/wytnij-awatar",
"DELETE_AVATAR": "konto/usun-awatar",
# forum
"FORUM": "forum",
"FORUM_FILMASTER": "forum/forum-filmastera",
"FORUM_HYDE_PARK": "forum/hyde-park",
"EDIT_COMMENT": "edytuj-komentarz",
# user activities
"COMMENTS": "komentarze",
"REVIEWS": "recenzje",
"REVIEW": "recenzja",
"SHORT_REVIEWS": "krotkie-recenzje",
"SHORT_REVIEW": "krotka-recenzja",
# default poster
"DEFAULT_POSTER": "/static/img/default_poster.png",
"DEFAULT_ACTOR": "/static/img/default_actor.png",
#rss
"RSS": "rss",
# special events
"SHOW_EVENT": "wydarzenia",
"SHOW_FESTIVAL": "festiwal",
"ORIGINAL_TITLE": "tytul-oryginalny",
# contest
"SHOW_GAME": "mecz",
"SHOW_CONTEST": "plebiscyt",
"CONTEST_VOTE_AJAX": "vote_ajax",
# add films
"ADD_FILMS":"dodaj-film",
"EDIT_CAST":"edytuj-obsade",
#add links
"ADD_LINKS":"dodaj-link",
"REMOVE_LINKS":"usun-link",
"ADD_VIDEO":"dodaj-video",
"LINKS":"linki",
"LINK":"link",
#nudge button
"NUDGE":"szturchnij",
#follow
"FOLLOW":"obserwuj",
#delete comment
"DELETE_COMMENT":"usun-komentarz",
#moderated photos
"POSTER_ADD":"dodaj-plakat",
"PHOTO_ADD":"dodaj-zdjecie",
"MODERATED_PHOTOS": "plakaty-i-zdjecia",
#moderated films
"MODERATED_FILMS": "filmy",
"MOVIES": "filmy",
"GENRE": "gatunek",
#mobile landing page
"MOBILE":"mobile",
#content moderation
"MODERATION": "moderacja",
#wall
"WALL":"wall",
#settings
"SETTINGS": "ustawienia",
"MANAGE_NOTIFICATIONS": "zarzadzaj-powiadomieniami",
"IMPORT_RATINGS":"importuj-oceny",
#dashboard
"NEW_ARTICLE":"nowy-artykul",
"EDIT_ARTICLE":"edytuj-artykul",
"RATED_FILMS":"oceny",
#showtimes
"SHOWTIMES": "rekomendacje",
"SCREENING": "seanse",
"CINEMAS": "kina",
"CINEMA": "kino",
"CHANNEL": "kanal",
"THEATERS": "kina",
"THEATER": "kino",
"TV": "tv",
"TV_CHANNELS": "kanaly-tv",
# applications
"APPLICATIONS": "aplikacje",
"APPLICATION": "aplikacja",
"ADD_APPLICATION": "dodaj-aplikacje",
"REMOVE_ACCESS_TOKEN": "usun-token",
"REMOVE_APPLICATION": "usun-aplikacje",
}
| [
"[email protected]"
]
| |
fc476732d5002a650fe826d5c6d7ec00bb625f4d | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /trunk/pyformex/examples/Isopar.py | 09660af39e5a8ca12a1bdf071f0b7b4a4e9a1b05 | []
| no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | #!/usr/bin/env pyformex --gui
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Isopar
level = 'normal'
topics = ['geometry']
techniques = ['dialog', 'color','isopar']
"""
from plugins import isopar
import simple
import elements
wireframe()
ttype = ask("Select type of transformation",['Cancel','1D','2D','3D'])
if not ttype or ttype == 'Cancel':
exit()
tdim = int(ttype[0])
# create a unit quadratic grid in tdim dimensions
x = Coords(simple.regularGrid([0.]*tdim, [1.]*tdim, [2]*tdim)).reshape(-1,3)
x1 = Formex(x)
x2 = x1.copy()
# move a few points
if tdim == 1:
eltype = 'line3'
x2[1] = x2[1].rot(-22.5)
x2[2] = x2[2].rot(22.5)
elif tdim == 2:
eltype = 'quad9'
x2[5] = x2[2].rot(-22.5)
x2[8] = x2[2].rot(-45.)
x2[7] = x2[2].rot(-67.5)
x2[4] = x2[8] * 0.6
else:
eltype = 'hex27'
tol = 0.01
d = x2.distanceFromPoint(x2[0])
w = where((d > 0.5+tol) * (d < 1.0 - tol))[0]
# avoid error messages during projection
errh = seterr(all='ignore')
x2[w] = x2.projectOnSphere(0.5)[w]
w = where(d > 1.+tol)[0]
x2[w] = x2.projectOnSphere(1.)[w]
seterr(**errh)
clear()
message('This is the set of nodes in natural coordinates')
draw(x1,color=blue)
message('This is the set of nodes in cartesian coordinates')
draw(x2,color=red)
drawNumbers(x2,color=red)
drawNumbers(x1)
n = 8
stype = ask("Select type of structure",['Cancel','1D','2D','3D'])
if stype == 'Cancel':
exit()
sdim = int(stype[0])
if sdim == 1:
F = simple.line([0.,0.,0.],[1.,1.,0.],10)
elif sdim == 2:
F = simple.rectangle(1,1,1.,1.)
else:
## v = array(elements.Hex8.vertices)
## f = array(elements.Hex8.faces[1])
## F = Formex(v[f])
F = elements.Hex8.toFormex()
if sdim > 1:
for i in range(sdim):
F = F.replic(n,1.,dir=i)
if sdim < tdim:
F = F.trl(2,0.5)
clear()
message('This is the initial Formex')
FA=draw(F)
sz = F.sizes()
if sdim < tdim:
sz[sdim:tdim] = 2.
x1 = x1.scale(sz)
x2 = x2.scale(sz)
G=F.isopar(eltype,x2.points(),x1.points())
G.setProp(1)
message('This is the transformed Formex')
draw(G)
pause()
undraw(FA)
# End
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
]
| bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
c67201d0dc8eebe1dd5677a52570999536586ff1 | 1a483740de2cb5a158d48eef4fa6be8e2e1cfb39 | /apps/projects/migrations/0001_initial.py | 2a417d2a9ba1638fe7bc788ef2a0512126033851 | []
| no_license | Navajyoth/proman | e8c5ab9a621af98f56ea3771695a23673cf67de6 | 23619138735c9a8317b50df97cdfe27a8180328f | refs/heads/master | 2021-01-10T15:52:01.763875 | 2016-01-19T11:01:39 | 2016-01-19T11:01:39 | 49,870,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('project_name', models.CharField(max_length=64, null=True, blank=True)),
],
),
]
| [
"[email protected]"
]
| |
5d7c6cb5d86f6dfeaaa32c199c33c6b3c2bb6f23 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_7/102.py | 998a2e0917fabf1d52113661b10a59105356d584 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import sys
def calc_trees():
"""Read data from stdin and calculate all trees.
Returns a list of coordinates (tuples of ints).
"""
n,A,B,C,D,x,y,M = (int(e) for e in raw_input().split())
trees = [(x, y)]
for i in xrange(n - 1):
x = (A * x + B) % M
y = (C * y + D) % M
trees.append((x, y))
return trees
N = input()
for i in xrange(N):
result = 0
trees = calc_trees()
i1 = 0
for t1 in trees:
i2 = i1 + 1
for t2 in trees[i1 + 1:]:
i3 = i2 + 1
for t3 in trees[i2 + 1:]:
x = (t1[0] + t2[0] + t3[0]) / 3.0
y = (t1[1] + t2[1] + t3[1]) / 3.0
if (x == int(x) and y == int(y)):
result += 1
i3 += 1
i2 += 1
i1 += 1
print "Case #%d: %d" % (i + 1, result)
| [
"[email protected]"
]
| |
099e1e04e14bdbed2f9c2d4cd79c06a2e5cd9ca8 | 33e2c0e11a6fbcc687750dbdcd2e063acf5b931b | /setup.py | 17a2541858522e1e1f5d88fa0f7dd8566e9ffe9f | [
"MIT"
]
| permissive | uk-gov-mirror/nhsengland.ckanext-introjs | f1cb640819f09cdc6d4ecd82818a1e8b4b2653be | 052d20a4e93cf824a1b28e7ea2e04c385615b40d | refs/heads/master | 2021-05-28T17:21:31.739884 | 2015-02-24T14:44:56 | 2015-02-24T14:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='ckanext-introjs',
version=version,
description="Adds intro.js to CKAN so users can follow a guided tour of the UI",
long_description='''
''',
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Ano Nymous',
author_email='[email protected]',
url='https://usablica.github.io/intro.js/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.introjs'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points='''
[ckan.plugins]
# Add plugins here, e.g.
# myplugin=ckanext.introjs.plugin:PluginClass
''',
)
| [
"[email protected]"
]
| |
a0dfc36982f65573fcad2fea205e8bdce1e056c4 | 3ad8705bf9f1b5563b2a57f4ed2a1ea0f7cc2c2a | /langusta_client/exceptions.py | 4e41f2bb0164828cbe30c6364cd2a2f8dbd20820 | [
"ISC"
]
| permissive | moneypark/langusta-client | 4f9f27d4db28149173d8c96841fc132f50a0a66b | af3ce6504c9f6cc8a0d28832a3218ed99ea2e1cc | refs/heads/master | 2023-03-17T06:15:14.448572 | 2023-03-09T14:10:59 | 2023-03-09T14:10:59 | 47,131,223 | 1 | 5 | ISC | 2023-03-09T14:11:01 | 2015-11-30T16:25:45 | Python | UTF-8 | Python | false | false | 97 | py | class LangustaException(Exception):
pass
class NoPoFilesFound(LangustaException):
pass
| [
"[email protected]"
]
| |
5a0ebc6d298b7b1320a813de51ed196528c6243f | 377420d718094a37da2e170718cecd80435d425a | /google/ads/googleads/v4/services/services/media_file_service/client.py | 0a1348ff6250fc9e53ec0771aa0691cae94c0f4f | [
"Apache-2.0"
]
| permissive | sammillendo/google-ads-python | ed34e737748e91a0fc5716d21f8dec0a4ae088c1 | a39748521847e85138fca593f3be2681352ad024 | refs/heads/master | 2023-04-13T18:44:09.839378 | 2021-04-22T14:33:09 | 2021-04-22T14:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,377 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v4.resources.types import media_file
from google.ads.googleads.v4.services.types import media_file_service
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import MediaFileServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MediaFileServiceGrpcTransport
class MediaFileServiceClientMeta(type):
"""Metaclass for the MediaFileService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MediaFileServiceTransport]]
_transport_registry["grpc"] = MediaFileServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[MediaFileServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MediaFileServiceClient(metaclass=MediaFileServiceClientMeta):
"""Service to manage media files."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MediaFileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MediaFileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MediaFileServiceTransport:
"""Return the transport used by the client instance.
Returns:
MediaFileServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def media_file_path(customer: str, media_file: str,) -> str:
"""Return a fully-qualified media_file string."""
return "customers/{customer}/mediaFiles/{media_file}".format(
customer=customer, media_file=media_file,
)
@staticmethod
def parse_media_file_path(path: str) -> Dict[str, str]:
"""Parse a media_file path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/mediaFiles/(?P<media_file>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, MediaFileServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the media file service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MediaFileServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MediaFileServiceTransport):
# transport is a MediaFileServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MediaFileServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_media_file(
self,
request: media_file_service.GetMediaFileRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> media_file.MediaFile:
r"""Returns the requested media file in full detail.
Args:
request (:class:`google.ads.googleads.v4.services.types.GetMediaFileRequest`):
The request object. Request message for
[MediaFileService.GetMediaFile][google.ads.googleads.v4.services.MediaFileService.GetMediaFile]
resource_name (:class:`str`):
Required. The resource name of the
media file to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v4.resources.types.MediaFile:
A media file.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a media_file_service.GetMediaFileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, media_file_service.GetMediaFileRequest):
request = media_file_service.GetMediaFileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_media_file]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_media_files(
self,
request: media_file_service.MutateMediaFilesRequest = None,
*,
customer_id: str = None,
operations: Sequence[media_file_service.MediaFileOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> media_file_service.MutateMediaFilesResponse:
r"""Creates media files. Operation statuses are returned.
Args:
request (:class:`google.ads.googleads.v4.services.types.MutateMediaFilesRequest`):
The request object. Request message for
[MediaFileService.MutateMediaFiles][google.ads.googleads.v4.services.MediaFileService.MutateMediaFiles]
customer_id (:class:`str`):
Required. The ID of the customer
whose media files are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v4.services.types.MediaFileOperation]`):
Required. The list of operations to
perform on individual media file.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v4.services.types.MutateMediaFilesResponse:
Response message for a media file
mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a media_file_service.MutateMediaFilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, media_file_service.MutateMediaFilesRequest):
request = media_file_service.MutateMediaFilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_media_files
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("MediaFileServiceClient",)
| [
"[email protected]"
]
| |
84f14ff4534964c26f65d38a7312090e521edd05 | 818fbf7e5ad465d81b5841c3b1f3222dec3877e3 | /spotify/urls.py | 73fff59963e36a7d29247497d51fb93a62580135 | []
| no_license | anonshubh/music-controller | 29d2c0be0265588e583ec7cde9578bc59e214d4a | c71329c691f43dff994ef3f69aa78dc438db047b | refs/heads/main | 2023-02-13T22:30:24.102833 | 2021-01-08T13:11:45 | 2021-01-08T13:11:45 | 320,020,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django.urls import path
from . import views
urlpatterns=[
path('get-auth-url/',views.AuthURL.as_view()),
path('redirect/',views.spotify_callback),
path('is-authenticated/',views.IsAuthenticated.as_view()),
path('current-song/',views.CurrentSong.as_view()),
path('play-song/',views.PlaySong.as_view()),
path('pause-song/',views.PauseSong.as_view()),
] | [
"[email protected]"
]
| |
ec6f71dea44bb33840b8a9d2801b571e4dff1aa1 | d2332604fc80b6d622a263b2af644425a7e703de | /fast-track/strings/4_valid_anagram.py | 4b6cd138e04b2cd36803e6c0e4c3755c99e2ed8a | []
| no_license | abhijitdey/coding-practice | b3b83a237c1930266768ce38500d6812fc31c529 | 6ae2a565042bf1d6633cd98ed774e4a77f492cc8 | refs/heads/main | 2023-08-14T23:31:06.090613 | 2021-10-18T21:35:56 | 2021-10-18T21:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | """
Given two strings s and t, return true if t is an anagram of s, and false otherwise.
"""
def get_char_counts(string):
char_counts = dict()
for char in string:
if char in char_counts:
char_counts[char] += 1
else:
char_counts[char] = 1
return char_counts
def check_anagram(s, t):
if len(s) != len(t) or not s or not t:
return False
s_counts = get_char_counts(s)
t_counts = get_char_counts(t)
for char, count in s_counts.items():
if count != t_counts.get(char, 0):
return False
return True
| [
"[email protected]"
]
| |
892237619b675d7d223b9efee8985aa62256e138 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/ms_data/identity_constraint/id_l086_xsd/__init__.py | 1aefc80d5eb9fa2d66622427e4aa78c72449dfad | [
"MIT"
]
| permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 155 | py | from output.models.ms_data.identity_constraint.id_l086_xsd.id_l086 import (
Root,
T,
Ttype,
)
__all__ = [
"Root",
"T",
"Ttype",
]
| [
"[email protected]"
]
| |
0b70531014e35ca99bb424d18b4ea7de3b40d224 | 164ffe077dde59373ad9fadcfd727f279a1cfe93 | /jni_build/jni/include/tensorflow/examples/how_tos/reading_data/convert_to_records.py | 7794c3f6ea3625880921d442f02c5e4c2c00e81e | []
| no_license | Basofe/Community_Based_Repository_Traffic_Signs | 524a4cfc77dc6ed3b279556e4201ba63ee8cf6bd | a20da440a21ed5160baae4d283c5880b8ba8e83c | refs/heads/master | 2021-01-22T21:17:37.392145 | 2017-09-28T21:35:58 | 2017-09-28T21:35:58 | 85,407,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' # MNIST filenames
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
tf.app.flags.DEFINE_string('directory', '/tmp/data',
'Directory to download data files and write the '
'converted result')
tf.app.flags.DEFINE_integer('validation_size', 5000,
'Number of examples to separate from the training '
'data for the validation set.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def main(argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
]
| |
867d461fb328950aa3f4006290a8ecddd6bf3993 | 0a43afbcba776ed8ada0fef5425b1507aa4d51c1 | /smartbook/smartbook/sales/views.py | e49dfbf1221388510021574678f4a94867d10fe4 | []
| no_license | geethusuresh/inventory-systems | c76d6d10429f483499594df8c8f34d780531f18c | fd4211d29042776fa47da92162cbbbe8220090cd | refs/heads/master | 2021-01-02T08:51:31.278578 | 2014-09-28T07:35:54 | 2014-09-28T07:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,507 | py | # Create your views here.
import sys
import ast
import simplejson
import datetime as dt
from datetime import datetime
from decimal import *
from num2words import num2words
from django.db import IntegrityError
from django.db.models import Max
from django.contrib.auth.views import password_reset
from django.shortcuts import get_object_or_404, render
from django.views.generic.base import View
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.conf import settings
from sales.models import Sales, SalesItem, SalesReturn, SalesReturnItem, Quotation, QuotationItem, DeliveryNote, SalesInvoice
from inventory.models import Item, Inventory
from web.models import Customer, Staff, OwnerCompany
from reportlab.lib.units import cm
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Frame, Image, Table, TableStyle
from reportlab.lib import colors
from reportlab.lib.pagesizes import letter, A4
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.lib.units import inch
from reportlab.pdfgen import canvas
from django.http import HttpResponse
class SalesEntry(View):
def get(self, request, *args, **kwargs):
current_date = dt.datetime.now().date()
sales_invoice_number = Sales.objects.aggregate(Max('id'))['id__max']
if not sales_invoice_number:
sales_invoice_number = 1
else:
sales_invoice_number = sales_invoice_number + 1
return render(request, 'sales/sales_entry.html',{
'sales_invoice_number': sales_invoice_number,
'current_date': current_date.strftime('%d/%m/%Y'),
})
def post(self, request, *args, **kwargs):
sales_dict = ast.literal_eval(request.POST['sales'])
sales, sales_created = Sales.objects.get_or_create(sales_invoice_number=sales_dict['sales_invoice_number'])
sales.sales_invoice_number = sales_dict['sales_invoice_number']
sales.sales_invoice_date = datetime.strptime(sales_dict['sales_invoice_date'], '%d/%m/%Y')
customer = Customer.objects.get(customer_name=sales_dict['customer'])
salesman = Staff.objects.get(user__first_name=sales_dict['staff'])
sales.discount = sales_dict['net_discount']
sales.round_off = sales_dict['roundoff']
sales.net_amount = sales_dict['net_total']
sales.grant_total = sales_dict['grant_total']
sales.customer = customer
sales.salesman = salesman
sales.save()
sales_items = sales_dict['sales_items']
for sales_item in sales_items:
item = Item.objects.get(code=sales_item['item_code'])
s_item, item_created = SalesItem.objects.get_or_create(item=item, sales=sales)
inventory, created = Inventory.objects.get_or_create(item=item)
if sales_created:
inventory.quantity = inventory.quantity - int(sales_item['qty_sold'])
else:
inventory.quantity = inventory.quantity + s_item.quantity_sold - int(sales_item['qty_sold'])
inventory.save()
s_item, item_created = SalesItem.objects.get_or_create(item=item, sales=sales)
s_item.sales = sales
s_item.item = item
s_item.quantity_sold = sales_item['qty_sold']
s_item.discount_given = sales_item['disc_given']
s_item.net_amount = sales_item['net_amount']
s_item.save()
sales_invoice = SalesInvoice.objects.create(sales=sales)
sales.save()
sales_invoice.date = sales.sales_invoice_date
sales_invoice.customer = sales.customer
sales_invoice.invoice_no = sales.sales_invoice_number
sales_invoice.save()
res = {
'result': 'Ok',
'sales_invoice_id': sales_invoice.id,
}
response = simplejson.dumps(res)
status_code = 200
return HttpResponse(response, status = status_code, mimetype="application/json")
class SalesReturnView(View):
def get(self, request, *args, **kwargs):
if SalesReturn.objects.exists():
invoice_number = int(SalesReturn.objects.aggregate(Max('return_invoice_number'))['return_invoice_number__max']) + 1
else:
invoice_number = 1
if not invoice_number:
invoice_number = 1
return render(request, 'sales/return_entry.html', {
'invoice_number' : invoice_number,
})
def post(self, request, *args, **kwargs):
post_dict = request.POST['sales_return']
post_dict = ast.literal_eval(post_dict)
sales = Sales.objects.get(sales_invoice_number=post_dict['sales_invoice_number'])
sales_return, created = SalesReturn.objects.get_or_create(sales=sales, return_invoice_number = post_dict['invoice_number'])
sales_return.date = datetime.strptime(post_dict['sales_return_date'], '%d/%m/%Y')
sales_return.net_amount = post_dict['net_return_total']
sales_return.save()
return_items = post_dict['sales_items']
for item in return_items:
return_item = Item.objects.get(code=item['item_code'])
s_return_item, created = SalesReturnItem.objects.get_or_create(item=return_item, sales_return=sales_return)
s_return_item.amount = item['returned_amount']
s_return_item.return_quantity = item['returned_quantity']
s_return_item.save()
inventory = Inventory.objects.get(item=return_item)
inventory.quantity = inventory.quantity + int(item['returned_quantity'])
inventory.save()
response = {
'result': 'Ok',
}
status_code = 200
return HttpResponse(response, status = status_code, mimetype="application/json")
class ViewSales(View):
def get(self, request, *args, **kwargs):
return render(request, 'sales/view_sales.html',{})
class SalesDetails(View):
def get(self, request, *args, **kwargs):
if request.is_ajax():
invoice_number = request.GET['invoice_no']
try:
sales = Sales.objects.get(sales_invoice_number=invoice_number)
except:
sales = None
if sales:
sales_items = SalesItem.objects.filter(sales=sales)
sl_items = []
for item in sales_items:
sl_items.append({
'item_code': item.item.code,
'item_name': item.item.name,
'barcode': item.item.barcode,
'stock': item.item.inventory_set.all()[0].quantity,
'unit_price': item.item.inventory_set.all()[0].selling_price,
'tax': item.item.tax,
'uom': item.item.uom.uom,
'quantity_sold': item.quantity_sold,
'discount_given': item.discount_given,
})
sales_dict = {
'invoice_number': sales.sales_invoice_number,
'sales_invoice_date': sales.sales_invoice_date.strftime('%d/%m/%Y'),
'customer': sales.customer.customer_name,
'sales_man': sales.salesman.user.first_name,
'net_amount': sales.net_amount,
'round_off': sales.round_off,
'grant_total': sales.grant_total,
'discount': sales.discount,
'sales_items': sl_items
}
res = {
'result': 'Ok',
'sales': sales_dict
}
else:
res = {
'result': 'No Sales entry for this invoice number',
}
response = simplejson.dumps(res)
status_code = 200
return HttpResponse(response, status = status_code, mimetype="application/json")
return render(request, 'sales/view_sales.html',{})
class CreateQuotation(View):
def get(self, request, *args, **kwargs):
current_date = dt.datetime.now().date()
ref_number = Quotation.objects.aggregate(Max('id'))['id__max']
if not ref_number:
ref_number = 1
prefix = 'QO'
else:
ref_number = ref_number + 1
prefix = Quotation.objects.latest('id').prefix
reference_number = prefix + str(ref_number)
context = {
'current_date': current_date.strftime('%d-%m-%Y'),
'reference_number': reference_number,
}
return render(request, 'sales/create_quotation.html', context)
def post(self, request, *args, **kwargs):
if request.is_ajax():
quotation_data = ast.literal_eval(request.POST['quotation'])
quotation, quotation_created = Quotation.objects.get_or_create(reference_id=quotation_data['reference_no'])
quotation.date = datetime.strptime(quotation_data['date'], '%d-%m-%Y')
quotation.attention = quotation_data['attention']
quotation.subject = quotation_data['subject']
quotation.net_total = quotation_data['total_amount']
quotation.save()
customer = Customer.objects.get(customer_name=quotation_data['customer'])
quotation.to = customer
quotation.save()
quotation_data_items = quotation_data['sales_items']
for quotation_item in quotation_data_items:
item = Item.objects.get(code=quotation_item['item_code'])
quotation_item_obj, item_created = QuotationItem.objects.get_or_create(item=item, quotation=quotation)
inventory, created = Inventory.objects.get_or_create(item=item)
inventory.quantity = inventory.quantity - int(quotation_item['qty_sold'])
inventory.save()
quotation_item_obj.net_amount = float(quotation_item['net_amount'])
quotation_item_obj.quantity_sold = int(quotation_item['qty_sold'])
quotation_item_obj.save()
res = {
'result': 'OK',
'quotation_id': quotation.id,
}
response = simplejson.dumps(res)
return HttpResponse(response, status=200, mimetype='application/json')
class DeliveryNotePDF(View):
def get(self, request, *args, **kwargs):
delivery_note_id = kwargs['delivery_note_id']
delivery_note = DeliveryNote.objects.get(id=delivery_note_id)
response = HttpResponse(content_type='application/pdf')
p = canvas.Canvas(response, pagesize=(1000, 1000))
status_code = 200
# p.drawString(100, 950, "SUNLIGHT STATIONARY")
# p.drawString(100, 930, "Colour Printing, Photo Copy, Spiral Binding")
# p.drawString(100, 910, "Tender Document and Printing Service")
# p.drawString(100, 890, "Tel. : +971-2-6763571")
# p.drawString(100, 870, "Fax : +971-2-6763581")
# p.drawString(100, 850, "E-mail : [email protected]")
# p.drawString(100, 830, "P.O.Box : 48296")
# p.drawString(100, 810, "Behind Russian Embassy")
# p.drawString(100, 790, "Ziyani, Abu Dhabi, U.A.E.")
# p.drawString(100, 700, "No. ")
# p.drawString(700, 700, "Date : ....................................")
# p.drawString(700, 680, "L.P.O. No : ............................")
# p.drawString(100, 650, "Mr.M/s.......................................................................................................................................................................................................................")
# data=[['Sl.No:', 'Description', 'Qty', 'Remarks']]
# table = Table(data, colWidths=[100, 400, 100, 150], rowHeights=40)
# table.setStyle(TableStyle([
# # ('INNERGRID', (0,0), (0,0), 0.25, colors.black),
# # ('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
# ('BOX', (0,0), (-1,-1), 0.25, colors.black),
# # ('LINEBEFORE',(1,1), (-1,-1),1,colors.black),
# # ('BACKGROUND',(0,0),(1,0),colors.lightgrey),
# # ('ALIGN', (1,1), (-1,-1),'CENTRE'),
# ]))
# table.wrapOn(p, 200, 400)
# table.drawOn(p,105,500)
y=700
i = 0
i = i + 1
for q_item in delivery_note.quotation.quotationitem_set.all():
y=y-40
data1=[[i, q_item.item.name, q_item.quantity_sold, '']]
table = Table(data1, colWidths=[100, 400, 100, 150], rowHeights=40)
# table.setStyle(TableStyle([
# # ('INNERGRID', (0,0), (0,0), 0.25, colors.black),
# # ('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
# ('BOX', (0,0), (-1,-1), 0.25, colors.black),
# # ('BACKGROUND',(0,0),(1,0),colors.lightgrey)
# ]))
table.wrapOn(p, 200, 600)
table.drawOn(p, 105, y)
i = i + 1
p.showPage()
p.save()
return response
class CreateQuotationPdf(View):
def get(self, request, *args, **kwargs):
quotation_id = kwargs['quotation_id']
quotation = Quotation.objects.get(id=quotation_id)
response = HttpResponse(content_type='application/pdf')
p = canvas.Canvas(response, pagesize=(1000, 1000))
status_code = 200
y = 850
# p.drawInlineImage(self, 1.jpg, 80,y, width=None,height=None)
try:
owner_company = OwnerCompany.objects.latest('id')
if owner_company.logo:
path = settings.PROJECT_ROOT.replace("\\", "/")+"/media/"+owner_company.logo.name
p.drawImage(path, 7*cm, 30*cm, width=20*cm, preserveAspectRatio=True)
except:
pass
p.roundRect(80, y-130, 840, 0.5*inch, 10, stroke=1, fill=0)
p.drawString(400, 735, "QUOTATION")
p.roundRect(80, y-250, 840, 120, 20, stroke=1, fill=0)
data=[['To :', quotation.to.customer_name]]
table = Table(data, colWidths=[100, 400], rowHeights=40)
table.wrapOn(p, 200, 400)
table.drawOn(p,160, 680)
data=[['Attention :', quotation.attention]]
table = Table(data, colWidths=[100, 400], rowHeights=40)
table.wrapOn(p, 200, 400)
table.drawOn(p,160, 650)
data=[['Subject :', quotation.subject]]
table = Table(data, colWidths=[100, 400], rowHeights=40)
table.wrapOn(p, 200, 400)
table.drawOn(p,160, 620)
data=[['Date :', quotation.date.strftime('%d-%m-%Y')]]
table = Table(data, colWidths=[100, 400], rowHeights=40)
table.wrapOn(p, 200, 400)
table.drawOn(p,700, 680)
data=[['Ref.id :', quotation.reference_id]]
table = Table(data, colWidths=[100, 400], rowHeights=40)
table.wrapOn(p, 200, 400)
table.drawOn(p,700, 650)
data=[['Sl.No:', 'Description', 'Qty', 'Unit Price', 'Amount(AED)']]
table = Table(data, colWidths=[100, 400, 100, 100, 100], rowHeights=40)
table.setStyle(TableStyle([
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('LINEBEFORE',(1,0), (0,-1),1,colors.black),
]))
table.wrapOn(p, 200, 400)
table.drawOn(p,105,500)
x=500
i = 0
i = i + 1
for q_item in quotation.quotationitem_set.all():
x=x-40
data1=[[i, q_item.item.name, q_item.quantity_sold, q_item.item.inventory_set.all()[0].unit_price, q_item.net_amount]]
table = Table(data1, colWidths=[100, 400, 100, 100, 100], rowHeights=40)
table.setStyle(TableStyle([
# ('INNERGRID', (0,0), (0,0), 0.25, colors.black),
# ('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
# ('BACKGROUND',(0,0),(1,0),colors.lightgrey)
]))
# table.wrapOn(p, 300, 200)
table.wrapOn(p, 200, 400)
# table.drawOn(p,105,460)
table.drawOn(p,105, x)
i = i + 1
data=[['Total', quotation.net_total]]
table = Table(data, colWidths=[700, 100], rowHeights=40)
table.setStyle(TableStyle([
# ('INNERGRID', (0,0), (0,0), 0.25, colors.black),
# ('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
# ('BACKGROUND',(0,0),(1,0),colors.lightgrey),
('ALIGN', (0,0), (-1,-1),'RIGHT'),
]))
table.wrapOn(p, 200, 400)
table.drawOn(p,105,x-40)
p.drawString(160, x-80, "Hope the above quoted prices will meet your satisfaction and for further information please do not hesitate to contact us.")
p.drawString(160, 220, "For")
p.drawString(160, 200, "Sunlight Stationary")
p.drawString(160, 120, "Authorized Signatory")
p.drawString(700, 120, "Prepared By")
data=[['Tel: +971-2-6763571, Fax : +971-2-6763581,P.O.Box : 48296, Abu Dhabi, United Arab Emirates']]
table = Table(data, colWidths=[700], rowHeights=30)
table.setStyle(TableStyle([
# ('BOX', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN',(0,0), (-1,-1),'CENTRE'),
]))
table.wrapOn(p, 200, 400)
table.drawOn(p,160, 50)
p.showPage()
p.save()
return response
class CreateDeliveryNote(View):
def get(self, request, *args, **kwargs):
current_date = dt.datetime.now().date()
ref_number = DeliveryNote.objects.aggregate(Max('id'))['id__max']
if not ref_number:
ref_number = 1
prefix = 'DN'
else:
ref_number = ref_number + 1
prefix = DeliveryNote.objects.latest('id').prefix
delivery_no = prefix + str(ref_number)
context = {
'current_date': current_date.strftime('%d-%m-%Y'),
'delivery_no': delivery_no,
}
return render(request, 'sales/create_delivery_note.html', context)
def post(self, request, *args, **kwargs):
if request.is_ajax():
quotation_details = ast.literal_eval(request.POST['quotation'])
delivery_note_details = ast.literal_eval(request.POST['delivery_note'])
quotation = Quotation.objects.get(reference_id=delivery_note_details['quotation_no'])
for q_item in quotation.quotationitem_set.all():
for item_data in quotation_details['sales_items']:
if q_item.item.code == item_data['item_code']:
if q_item.quantity_sold != int(item_data['qty_sold']):
item = q_item.item
inventory, created = Inventory.objects.get_or_create(item=item)
inventory.quantity = inventory.quantity + int(q_item.quantity_sold)
inventory.save()
inventory.quantity = inventory.quantity - int(item_data['qty_sold'])
inventory.save()
q_item.quantity_sold = int(item_data['qty_sold'])
q_item.save()
if q_item.discount != float(item_data['disc_given']):
q_item.discount = item_data['disc_given']
q_item.save()
if q_item.net_amount != float(item_data['net_amount']):
q_item.net_amount = item_data['net_amount']
q_item.save()
if quotation.net_total != float(quotation_details['net_total']):
quotation.net_total = quotation_details['net_total']
quotation.save()
delivery_note, created = DeliveryNote.objects.get_or_create(quotation=quotation)
quotation.processed = True
quotation.save()
delivery_note.quotation = quotation
delivery_note.customer = quotation.to
delivery_note.date = datetime.strptime(delivery_note_details['date'], '%d-%m-%Y')
delivery_note.lpo_number = delivery_note_details['lpo_no']
delivery_note.delivery_note_number = delivery_note_details['delivery_note_no']
delivery_note.save()
res = {
'result': 'ok',
'delivery_note_id': delivery_note.id
}
response = simplejson.dumps(res)
return HttpResponse(response, status=200, mimetype='application/json')
class QuotationDetails(View):
def get(self, request, *args, **kwargs):
in_sales_invoice_creation = ''
sales_invoice_creation = request.GET.get('sales_invoice', '')
if sales_invoice_creation == 'true':
quotations = Quotation.objects.filter(reference_id__istartswith=ref_number, is_sales_invoice_created=False)
else:
quotations = Quotation.objects.filter(reference_id__istartswith=ref_number, processed=False, is_sales_invoice_created=False)
quotation_list = []
for quotation in quotations:
item_list = []
i = 0
i = i + 1
for q_item in quotation.quotationitem_set.all():
item_list.append({
'sl_no': i,
'item_name': q_item.item.name,
'item_code': q_item.item.code,
'barcode': q_item.item.barcode,
'item_description': q_item.item.description,
'qty_sold': q_item.quantity_sold,
'tax': q_item.item.tax,
'uom': q_item.item.uom.uom,
'current_stock': q_item.item.inventory_set.all()[0].quantity if q_item.item.inventory_set.count() > 0 else 0 ,
'selling_price': q_item.item.inventory_set.all()[0].selling_price if q_item.item.inventory_set.count() > 0 else 0 ,
'discount_permit': q_item.item.inventory_set.all()[0].discount_permit_percentage if q_item.item.inventory_set.count() > 0 else 0,
'net_amount': q_item.net_amount,
'discount_given': q_item.discount,
})
i = i + 1
quotation_list.append({
'ref_no': quotation.reference_id,
'customer': quotation.to.customer_name if quotation.to else '' ,
'items': item_list,
'net_total': quotation.net_total,
'delivery_no': quotation.deliverynote_set.all()[0].delivery_note_number if quotation.deliverynote_set.all().count() > 0 else 0,
})
res = {
'quotations': quotation_list,
'result': 'ok',
}
response = simplejson.dumps(res)
return HttpResponse(response, status=200, mimetype='application/json')
class DeliveryNoteDetails(View):
def get(self, request, *args, **kwargs):
delivery_no = request.GET.get('delivery_no', '')
delivery_note_details = DeliveryNote.objects.filter(delivery_note_number__istartswith=delivery_no, processed=False)
delivery_note_list = []
for delivery_note in delivery_note_details:
i = 0
i = i + 1
item_list = []
for q_item in delivery_note.quotation.quotationitem_set.all():
item_list.append({
'sl_no': i,
'item_name': q_item.item.name,
'item_code': q_item.item.code,
'barcode': q_item.item.barcode,
'item_description': q_item.item.description,
'qty_sold': q_item.quantity_sold,
'tax': q_item.item.tax,
'uom': q_item.item.uom.uom,
'current_stock': q_item.item.inventory_set.all()[0].quantity if q_item.item.inventory_set.count() > 0 else 0 ,
'selling_price': q_item.item.inventory_set.all()[0].selling_price if q_item.item.inventory_set.count() > 0 else 0 ,
'discount_permit': q_item.item.inventory_set.all()[0].discount_permit_percentage if q_item.item.inventory_set.count() > 0 else 0,
'net_amount': q_item.net_amount,
'discount_given': q_item.discount,
})
i = i + 1
delivery_note_list.append({
'ref_no': delivery_note.quotation.reference_id,
'customer': delivery_note.quotation.to.customer_name if delivery_note.quotation.to else '' ,
'items': item_list,
'net_total': delivery_note.quotation.net_total,
'delivery_no': delivery_note.delivery_note_number,
})
res = {
'delivery_notes': delivery_note_list,
'result': 'ok',
}
response = simplejson.dumps(res)
return HttpResponse(response, status=200, mimetype='application/json')
class QuotationDeliverynoteSales(View):
def get(self, request, *args, **kwargs):
current_date = dt.datetime.now().date()
inv_number = SalesInvoice.objects.aggregate(Max('id'))['id__max']
if not inv_number:
inv_number = 1
prefix = 'SI'
else:
inv_number = inv_number + 1
prefix = SalesInvoice.objects.latest('id').prefix
invoice_number = prefix + str(inv_number)
return render(request, 'sales/create_sales_entry.html',{
'sales_invoice_number': invoice_number,
'current_date': current_date.strftime('%d/%m/%Y'),
})
def post(self, request, *args, **kwargs):
sales_dict = ast.literal_eval(request.POST['sales'])
sales, sales_created = Sales.objects.get_or_create(sales_invoice_number=sales_dict['sales_invoice_number'])
sales.sales_invoice_number = sales_dict['sales_invoice_number']
sales.sales_invoice_date = datetime.strptime(sales_dict['sales_invoice_date'], '%d/%m/%Y')
quotation = Quotation.objects.get(reference_id=sales_dict['quotation_ref_no'])
for q_item in quotation.quotationitem_set.all():
for item_data in sales_dict['sales_items']:
if q_item.item.code == item_data['item_code']:
if q_item.quantity_sold != int(item_data['qty_sold']):
item = q_item.item
inventory, created = Inventory.objects.get_or_create(item=item)
inventory.quantity = inventory.quantity + int(q_item.quantity_sold)
inventory.save()
inventory.quantity = inventory.quantity - int(item_data['qty_sold'])
inventory.save()
q_item.quantity_sold = int(item_data['qty_sold'])
q_item.save()
if q_item.discount != float(item_data['disc_given']):
q_item.discount = item_data['disc_given']
q_item.save()
if q_item.net_amount != float(item_data['net_amount']):
q_item.net_amount = item_data['net_amount']
q_item.save()
if quotation.net_total != float(sales_dict['net_total']):
quotation.net_total = sales_dict['net_total']
quotation.save()
sales.quotation = quotation
if sales_dict['delivery_no'] is not 0:
delivery_note, delivery_note_created = DeliveryNote.objects.get_or_create(delivery_note_number=sales_dict['delivery_no'], quotation=quotation)
# if delivery_note_created:
# delivery_note.customer = quotation.to
# delivery_note.date = datetime.strptime(sales_dict['sales_invoice_date'], '%d/%m/%Y')
# ref_number = DeliveryNote.objects.aggregate(Max('id'))['id__max']
# if not ref_number:
# ref_number = 1
# prefix = 'DN'
# else:
# ref_number = ref_number + 1
# prefix = DeliveryNote.objects.latest('id').prefix
# delivery_no = prefix + str(ref_number)
# delivery_note.delivery_note_number = delivery_no
# delivery_note.save()
sales.delivery_note = delivery_note
sales.customer = quotation.to
sales.save()
salesman = Staff.objects.get(user__first_name=sales_dict['staff'])
sales.discount = sales_dict['net_discount']
sales.round_off = sales_dict['roundoff']
sales.net_amount = sales_dict['net_total']
sales.grant_total = sales_dict['grant_total']
sales.salesman = salesman
sales.payment_mode = sales_dict['payment_mode']
if sales_dict['payment_mode'] == 'card':
sales.card_number = sales_dict['card_number']
sales.bank_name = sales_dict['bank_name']
sales.save()
sales_items = sales_dict['sales_items']
for sales_item in sales_items:
item = Item.objects.get(code=sales_item['item_code'])
s_item, item_created = SalesItem.objects.get_or_create(item=item, sales=sales)
# inventory, created = Inventory.objects.get_or_create(item=item)
# if sales_created:
# inventory.quantity = inventory.quantity - int(sales_item['qty_sold'])
# else:
# inventory.quantity = inventory.quantity + s_item.quantity_sold - int(sales_item['qty_sold'])
# inventory.save()
s_item, item_created = SalesItem.objects.get_or_create(item=item, sales=sales)
s_item.sales = sales
s_item.item = item
s_item.quantity_sold = sales_item['qty_sold']
s_item.discount_given = sales_item['disc_given']
s_item.net_amount = sales_item['net_amount']
s_item.save()
# Creating sales invoice
sales_invoice = SalesInvoice.objects.create(quotation=quotation, sales=sales)
if sales_dict['delivery_no'] is not 0:
delivery_note.processed = True
delivery_note.save()
quotation.is_sales_invoice_created = True
quotation.save()
if sales_dict['delivery_no'] is not 0:
sales.delivery_note = delivery_note
sales.quotation = quotation
sales.save()
sales_invoice.sales = sales
if sales_dict['delivery_no'] is not 0:
sales_invoice.delivery_note = delivery_note
sales_invoice.quotation = quotation
sales_invoice.date = datetime.strptime(sales_dict['sales_invoice_date'], '%d/%m/%Y')
sales_invoice.customer = quotation.to
sales_invoice.invoice_no = sales_dict['sales_invoice_number']
sales_invoice.save()
res = {
'result': 'Ok',
'sales_invoice_id': sales_invoice.id,
}
response = simplejson.dumps(res)
status_code = 200
return HttpResponse(response, status = status_code, mimetype="application/json")
class CreateSalesInvoicePDF(View):
def get(self, request, *args, **kwargs):
sales_invoice_id = kwargs['sales_invoice_id']
sales_invoice = SalesInvoice.objects.get(id=sales_invoice_id)
sales = sales_invoice.sales
response = HttpResponse(content_type='application/pdf')
p = canvas.Canvas(response, pagesize=(1000, 1100))
status_code = 200
y = 1100
style = [
('FONTSIZE', (0,0), (-1, -1), 20),
('FONTNAME',(0,0),(-1,-1),'Helvetica')
]
new_style = [
('FONTSIZE', (0,0), (-1, -1), 30),
('FONTNAME',(0,0),(-1,-1),'Helvetica')
]
data=[['', sales_invoice.date.strftime('%d-%m-%Y'), 'INVOICE', sales_invoice.invoice_no]]
table = Table(data, colWidths=[30, 310, 500, 100], rowHeights=50, style = style)
table.wrapOn(p, 200, 400)
table.drawOn(p,50, 930)
# data=[['', '', ]]
# table = Table(data, colWidths=[30, 60, 710, 100], rowHeights=50, style = style)
# table.wrapOn(p, 200, 400)
# table.drawOn(p,50, 830)
quotation = sales_invoice.quotation
customer_name = ''
if sales_invoice.customer:
customer_name = sales_invoice.customer.customer_name
data=[['', customer_name, sales_invoice.delivery_note.lpo_number if sales_invoice.delivery_note else '' ]]
# data=[['', customer_name, 'Lpo']]
table = Table(data, colWidths=[30, 510, 100], rowHeights=40, style = style)
table.wrapOn(p, 200, 400)
table.drawOn(p,50, 890)
data=[['', '', sales_invoice.date.strftime('%d-%m-%Y')]]
# table = Table(data, colWidths=[450, 60, 100], rowHeights=40, style = style)
table = Table(data, colWidths=[450, 90, 100], rowHeights=50, style = style)
table.wrapOn(p, 200, 400)
table.drawOn(p,50, 860)
if sales_invoice.quotation or sales_invoice.delivery_note:
data=[['', '', sales_invoice.delivery_note.delivery_note_number if sales_invoice.delivery_note else sales_invoice.quotation.reference_id]]
# table = Table(data, colWidths=[450, 60, 100], rowHeights=40, style = style)
# table.wrapOn(p, 200, 400)
# table.drawOn(p,100, 620)
table = Table(data, colWidths=[450, 90, 100], rowHeights=40, style = style)
table.wrapOn(p, 200, 400)
table.drawOn(p,50, 830)
x=760
i = 0
i = i + 1
TWOPLACES = Decimal(10) ** -2
total_amount = 0
for s_item in sales.salesitem_set.all():
x=x-30
item_price = s_item.item.inventory_set.all()[0].selling_price
total_amount = total_amount + (item_price*s_item.quantity_sold)
# final_price = item_price+(item_price*(s_item.item.tax/100))
# data1=[[i, s_item.item.code, s_item.item.name, s_item.quantity_sold, s_item.item.uom.uom, s_item.item.inventory_set.all()[0].unit_price, Decimal((final_price*s_item.quantity_sold)).quantize(TWOPLACES)]]
data1=[[i, s_item.item.code, s_item.item.name, s_item.quantity_sold, s_item.item.uom.uom, s_item.item.inventory_set.all()[0].unit_price, (item_price*s_item.quantity_sold)]]
table = Table(data1, colWidths=[50, 100, 440, 80, 90, 100, 50], rowHeights=40, style=style)
table.wrapOn(p, 200, 400)
table.drawOn(p,10,x)
i = i + 1
x=600
total_amount_in_words = num2words(total_amount).title() + ' Only'
data=[[total_amount_in_words, total_amount]]
# table = Table(data, colWidths=[450, 60, 100], rowHeights=40, style = style)
table = Table(data, colWidths=[500, 50], rowHeights=40, style = style)
table.wrapOn(p, 200, 100)
table.drawOn(p, 400, 10)
p.showPage()
p.save()
return response
class ReceiptVoucher(View):
def get(self, request, *args, **kwargs):
current_date = dt.datetime.now().date()
# inv_number = SalesInvoice.objects.aggregate(Max('id'))['id__max']
# if not inv_number:
# inv_number = 1
# prefix = 'SI'
# else:
# inv_number = inv_number + 1
# prefix = SalesInvoice.objects.latest('id').prefix
# invoice_number = prefix + str(inv_number)
return render(request, 'sales/create_receipt_voucher.html',{
# 'sales_invoice_number': invoice_number,
'current_date': current_date.strftime('%d/%m/%Y'),
})
class InvoiceDetails(View):
def get(self, request, *args, **kwargs):
invoice_no = request.GET.get('invoice_no', '')
sales_invoice_details = SalesInvoice.objects.filter(invoice_no__istartswith=invoice_no)
ctx_invoice_details = []
if sales_invoice_details.count() > 0:
for sales_invoice in sales_invoice_details:
ctx_invoice_details.append({
'invoice_no': sales_invoice.invoice_no,
'dated': sales_invoice.date.strftime('%d-%m-%Y'),
'customer': sales_invoice.customer.customer_name,
'amount': sales_invoice.sales.quotation.net_total if sales_invoice.sales else sales_invoice.sales.net_amount
})
res = {
'result': 'ok',
'invoice_details': ctx_invoice_details,
}
response = simplejson.dumps(res)
return HttpResponse(response, status=200, mimetype='application/json')
| [
"[email protected]"
]
| |
6a09bc215bc33dd4733f9d3f862ee3a2bebc8541 | 412e327f41ec7c7a8e9389740bc849ebe173059e | /python/finite_element_model/add_node_set.py | e1101e9c279d72fe8f02ff56b4929729b7e1237c | []
| no_license | erolsson/railway_ballast | 2b617b91ae720ef86cd1e5c89b08a34b92996fd5 | cfc86c22cc5e2f857c24ba1a01c2541edf839e3b | refs/heads/master | 2023-07-25T16:54:10.529328 | 2023-07-23T12:49:32 | 2023-07-23T12:49:32 | 186,101,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | import os
import odbAccess
def add_node_set_to_odb(odb_file_name, node_set_name, x_min=-1e99, x_max=1e99, y_min=-1e99, y_max=1e99,
z_min=-1e99, z_max=1e99, instance_name=None):
odb = odbAccess.openOdb(odb_file_name, readOnly=False)
if instance_name is None:
instance_name = odb.rootAssembly.instances.keys()[0]
nodes = odb.rootAssembly.instances[instance_name].nodes
set_node_labels = []
for node in nodes:
x, y, z = node.coordinates
if x_min < x < x_max and y_min < y < y_max and z_min < z < z_max:
set_node_labels.append(node.label)
odb.rootAssembly.instances[instance_name].NodeSetFromNodeLabels(name=node_set_name, nodeLabels=set_node_labels)
odb.save()
odb.close()
if __name__ == '__main__':
odb_directory = os.path.expanduser('~/railway_ballast/odbs/')
add_node_set_to_odb(odb_directory + 'embankment_sleepers_low_17_5t.odb', 'ballast_bottom_nodes',
y_min=7-1e-3, y_max=7+1e-3)
add_node_set_to_odb(odb_directory + 'embankment_sleepers_high_17_5t.odb', 'ballast_bottom_nodes',
y_min=7-1e-3, y_max=7+1e-3)
add_node_set_to_odb(odb_directory + 'embankment_slab_low_17_5t.odb', 'ballast_bottom_nodes',
y_min=7 - 1e-3, y_max=7 + 1e-3)
add_node_set_to_odb(odb_directory + 'embankment_slab_high_17_5t.odb', 'ballast_bottom_nodes',
y_min=7 - 1e-3, y_max=7 + 1e-3)
| [
"[email protected]"
]
| |
dc249fd778ca98ecbc6b3b862b7660fbb8310715 | a4ebfeefa95e97d60f9ad2fe36e75e59e3b50769 | /Trial_Aligned_Analysis/Trial_Aligned_Utils.py | 7aa0ed89d59bb10ef4bd5bd2df5ea1bdb13b351b | []
| no_license | matt-j-harvey/Widefield_Analysis | 9f1566ac26d4164d988ab9a43d953f228b15746d | e03037e909ce986d8221113e1bbf5d46ddd7ad70 | refs/heads/master | 2023-05-01T12:30:27.064246 | 2023-04-24T11:23:37 | 2023-04-24T11:23:37 | 252,776,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | import os
import h5py
from tqdm import tqdm
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
import tables
from datetime import datetime
from Widefield_Utils import widefield_utils
def get_session_averages(activity_dataset, metadata_dataset):
# Load Session List
session_list = metadata_dataset[:, 2]
unique_sessions = np.unique(session_list)
condition_1_session_average_list = []
condition_2_session_average_list = []
for session in unique_sessions:
session_indicies = np.where(session_list == session)[0]
session_trials = activity_dataset[session_indicies]
session_metadata = metadata_dataset[session_indicies]
[condition_1_trials, condition_2_trials] = split_trials_by_condition(session_trials, session_metadata)
condition_1_mean = np.mean(condition_1_trials, axis=0)
condition_2_mean = np.mean(condition_2_trials, axis=0)
condition_1_session_average_list.append(condition_1_mean)
condition_2_session_average_list.append(condition_2_mean)
return condition_1_session_average_list, condition_2_session_average_list
def get_mouse_averages(activity_dataset, metadata_dataset):
# Load Session List
mouse_list = metadata_dataset[:, 1]
unique_mice = np.unique(mouse_list)
condition_1_mouse_average_list = []
condition_2_mouse_average_list = []
for mouse in unique_mice:
mouse_indicies = np.where(mouse_list == mouse)[0]
mouse_activity_data = activity_dataset[mouse_indicies]
mouse_metadata = metadata_dataset[mouse_indicies]
# Get Session Averages
condition_1_session_averages, condition_2_session_averages = get_session_averages(mouse_activity_data, mouse_metadata)
# Get Mouse Averages
condition_1_mouse_average = np.mean(condition_1_session_averages, axis=0)
condition_2_mouse_average = np.mean(condition_2_session_averages, axis=0)
# Add To List
condition_1_mouse_average_list.append(condition_1_mouse_average)
condition_2_mouse_average_list.append(condition_2_mouse_average)
return condition_1_mouse_average_list, condition_2_mouse_average_list
def split_trials_by_condition(activity_dataset, metata_dataset):
condition_list = metata_dataset[:, 3]
unique_conditions = np.unique(condition_list)
combined_activity_list = []
for condition in unique_conditions:
condition_indicies = np.where(condition_list == condition)[0]
combined_activity_list.append(activity_dataset[condition_indicies])
return combined_activity_list
def get_mouse_session_averages(activity_dataset, metadata_dataset):
# Load Session List
mouse_list = metadata_dataset[:, 1]
unique_mice = np.unique(mouse_list)
condition_1_mouse_average_list = []
condition_2_mouse_average_list = []
for mouse in unique_mice:
mouse_indicies = np.where(mouse_list == mouse)[0]
mouse_activity_data = activity_dataset[mouse_indicies]
mouse_metadata = metadata_dataset[mouse_indicies]
# Get Session Averages
condition_1_session_averages, condition_2_session_averages = get_session_averages(mouse_activity_data, mouse_metadata)
# Add To List
condition_1_mouse_average_list.append(condition_1_session_averages)
condition_2_mouse_average_list.append(condition_2_session_averages)
return condition_1_mouse_average_list, condition_2_mouse_average_list | [
"[email protected]"
]
| |
4681259f82617ba5a09860ebcd688d94a01d71ed | 9d6218ca6c75a0e1ec1674fe410100d93d6852cb | /app/supervisor/venvs/supervisor/bin/echo_supervisord_conf | 6de0696c980aed451f221b8ebe2d45ed7f743467 | []
| no_license | bopopescu/uceo-2015 | 164694268969dd884904f51b00bd3dc034695be8 | 5abcbfc4ff32bca6ca237d71cbb68fab4b9f9f91 | refs/heads/master | 2021-05-28T21:12:05.120484 | 2015-08-05T06:46:36 | 2015-08-05T06:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | #!/edx/app/supervisor/venvs/supervisor/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'supervisor==3.1.3','console_scripts','echo_supervisord_conf'
__requires__ = 'supervisor==3.1.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('supervisor==3.1.3', 'console_scripts', 'echo_supervisord_conf')()
)
| [
"[email protected]"
]
| ||
edd05aa7a6d0b2519f15498c48e760666baaf731 | e9c7f991c319efe0b1831e8e8360e13c3e177555 | /Python_do_zero_Guanabara/06_Metodos/desafio/100_desafio.py | ed4107d33cd4045e831afb18d8b04a91da717e45 | [
"MIT"
]
| permissive | HenriqueSOliver/Projetos_Python | a3c4cabc61442221da968df55a463d9ad5865fcc | f18c5a343ad1b746a12bd372298b2debe9bc65ec | refs/heads/main | 2023-06-05T03:11:42.879988 | 2021-06-17T22:26:03 | 2021-06-17T22:26:03 | 327,629,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from random import randint
from time import sleep
def sortLista(lista):
print('Sorteando 5 valores da lista: ', end='')
for c in range (0, 5):
n = randint(1,100)
lista.append(n)
print(f' {n}', end=' - ', flush=True)
sleep(0.5)
print('PRONTO')
def somaP(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares de {lista}, temos {soma}')
#programa principal
números = []
sortLista(números)
somaP(números)
| [
"[email protected]"
]
| |
7427a528878b92af0bd4d6d3b7833e536c9f3af1 | 8b0609185265189fbec81023975221bb26cc6592 | /lib/core/function.py | 2d13c102f4d81ef56e705126541d2ecc27a6f776 | []
| no_license | witzou/HRNet-Facial-Landmark-Detection | 24afb5064444869c142cb4f7fcfd0bd0af880bb7 | 3c9f92a1887e259fede461ce40f303e23f2d0c00 | refs/heads/master | 2020-05-20T21:02:16.212344 | 2019-05-05T01:45:26 | 2019-05-05T01:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,444 | py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Created by Tianheng Cheng([email protected])
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import torch
import numpy as np
from ..utils.transforms import flip_back
from .evaluation import accuracy, decode_preds, compute_nme
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(config, train_loader, model, critertion, optimizer,
epoch, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
model.train()
nme_count = 0
nme_batch_sum = 0
end = time.time()
for i, (inp, target, meta) in enumerate(train_loader):
# measure data time
data_time.update(time.time()-end)
# compute the output
output = model(inp)
target = target.cuda(non_blocking=True)
loss = critertion(output, target)
# NME and accuracy
score_map = output.data.cpu()
acc = accuracy(score_map, target.cpu(), [1])
preds = decode_preds(score_map, meta['center'], meta['scale'], [64, 64])
nme_batch = compute_nme(preds, meta)
nme_batch_sum = nme_batch_sum + np.sum(nme_batch)
nme_count = nme_count + preds.size(0)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), inp.size(0))
acces.update(acc[0], inp.size(0))
batch_time.update(time.time()-end)
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.5f} ({acc.avg:.5f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=inp.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acces)
logger.info(msg)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acces.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
end = time.time()
nme = nme_batch_sum / nme_count
msg = 'Train Epoch {} time:{:.4f} loss:{:.4f} acc:{:.4f} nme:{:.4f}'\
.format(epoch, batch_time.avg, losses.avg, acces.avg, nme)
logger.info(msg)
def validate(config, val_loader, model, criterion, epoch, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
num_classes = config.MODEL.NUM_JOINTS
predictions = torch.zeros((len(val_loader.dataset), num_classes, 2))
model.eval()
nme_count = 0
nme_batch_sum = 0
count_failure_008 = 0
count_failure_010 = 0
end = time.time()
flip = config.TEST.FLIP_TEST
with torch.no_grad():
for i, (inp, target, meta) in enumerate(val_loader):
data_time.update(time.time() - end)
output = model(inp)
target = target.cuda(non_blocking=True)
score_map = output.data.cpu()
if flip:
# flip W
flip_input = torch.flip(inp, dim=[3])
flip_output = model(flip_input)
flip_output = flip_back(flip_output[-1].data.cpu())
score_map += flip_output
# loss
loss = criterion(output, target)
# accuracy
acc = accuracy(score_map, target.cpu(), [1])
preds = decode_preds(score_map, meta['center'], meta['scale'], [64, 64])
# NME
nme_temp = compute_nme(preds, meta)
# Failure Rate under different threshold
failure_008 = (nme_temp > 0.08).sum()
failure_010 = (nme_temp > 0.10).sum()
count_failure_008 += failure_008
count_failure_010 += failure_010
nme_batch_sum += np.sum(nme_temp)
nme_count = nme_count + preds.size(0)
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
# measure accuracy and record loss
losses.update(loss.item(), inp.size(0))
acces.update(acc[0], inp.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
nme = nme_batch_sum / nme_count
failure_008_rate = count_failure_008 / nme_count
failure_010_rate = count_failure_010 / nme_count
msg = 'Test Epoch {} time:{:.4f} loss:{:.4f} acc:{:.4f} nme:{:.4f} [008]:{:.4f} ' \
'[010]:{:.4f}'.format(epoch, batch_time.avg, losses.avg, acces.avg, nme,
failure_008_rate, failure_010_rate)
logger.info(msg)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', losses.avg, global_steps)
writer.add_scalar('valid_nme', nme, global_steps)
writer.add_scalar('valid_acc', acces.avg, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return nme, predictions
def inference(config, data_loader, model):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
num_classes = config.MODEL.NUM_JOINTS
predictions = torch.zeros((len(data_loader.dataset), num_classes, 2))
model.eval()
nme_count = 0
nme_batch_sum = 0
count_failure_008 = 0
count_failure_010 = 0
end = time.time()
flip = config.TEST.FLIP_TEST
with torch.no_grad():
for i, (inp, target, meta) in enumerate(data_loader):
data_time.update(time.time() - end)
output = model(inp)
target = target.cuda(non_blocking=True)
score_map = output.data.cpu()
if flip:
# flip W
flip_input = torch.flip(inp, dim=[3])
flip_output = model(flip_input)
flip_output = flip_back(flip_output[-1].data.cpu())
score_map += flip_output
# accuracy
acc = accuracy(score_map, target.cpu(), [1])
preds = decode_preds(score_map, meta['center'], meta['scale'], [64, 64])
# NME
nme_temp = compute_nme(preds, meta)
failure_008 = (nme_temp > 0.08).sum()
failure_010 = (nme_temp > 0.10).sum()
count_failure_008 += failure_008
count_failure_010 += failure_010
nme_batch_sum += np.sum(nme_temp)
nme_count = nme_count + preds.size(0)
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
acces.update(acc[0], inp.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
nme = nme_batch_sum / nme_count
failure_008_rate = count_failure_008 / nme_count
failure_010_rate = count_failure_010 / nme_count
msg = 'Test Results time:{:.4f} loss:{:.4f} acc:{:.4f} nme:{:.4f} [008]:{:.4f} ' \
'[010]:{:.4f}'.format(batch_time.avg, losses.avg, acces.avg, nme,
failure_008_rate, failure_010_rate)
logger.info(msg)
return nme, predictions
| [
"[email protected]"
]
| |
2e1c49c9ad0740b75147789a0c0d4b5d54c026b1 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D00A/JAPRESD00AUN.py | 4fb33d38de066ce2be86ab6db137a31ba760f7d6 | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,391 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD00AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 2},
{ID: 'PNA', MIN: 1, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'RFF', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'GIS', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'PNA', MIN: 1, MAX: 1, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'NAT', MIN: 0, MAX: 9},
{ID: 'PDI', MIN: 0, MAX: 1},
{ID: 'DOC', MIN: 0, MAX: 9},
]},
{ID: 'RFF', MIN: 1, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'GIS', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'EMP', MIN: 0, MAX: 5, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'GIS', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'ATT', MIN: 0, MAX: 20, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'PTY', MIN: 0, MAX: 1},
]},
{ID: 'LAN', MIN: 0, MAX: 10, LEVEL: [
{ID: 'GIS', MIN: 0, MAX: 1},
]},
]},
{ID: 'SAL', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'ATT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'GIS', MIN: 0, MAX: 2, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.