blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a34c022805208c44b234f2c301f4a94bd5a5713 | 3e3a835ee885eb9a71fd35ea58acd04361f72f47 | /python基础/面向对象.py/老王开枪.py | 7fb0d19a6621dfb5e9adef0e085cdce4217afadd | [] | no_license | hanfang302/py- | dbb259f24e06fbe1a900df53ae6867acb8cb54ea | dd3be494ccef5100c0f06ed936f9a540d8ca0995 | refs/heads/master | 2020-03-16T01:59:57.002135 | 2018-05-07T12:02:21 | 2018-05-07T12:02:21 | 132,454,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | # 人类
class Ren:
def __init__(self,name):
self.name = name
self.xue = 100
self.qiang = None
def __str__(self):
return self.name + '剩余的血量' + str(self.xue)
def anzidan(self,danjia,zidan):
danjia.baocunzidan(zidan)
def andanjia(self,qiang,danjia):
qiang.lianjiedanjia(danjia)
def naqiang(self,qiang):
self.qiang = qiang
def kaiqiang(self,diren):
self.qiang.she(diren)
def diaoxue(self,shashangli):
self.xue -= shashangli
# 弹夹类
class Danjia:
def __init__(self,rongliang):
self.rongliang = rongliang
self.rongnaList = []
def __str__(self):
return '弹夹当前的子弹的数量为:' + str(len(self.rongnaList)) + '/' + str(self.rongliang)
def baocunzidan(self,zidan):
if len(self.rongnaList) < self.rongliang:
self.rongnaList.append(zidan)
def chuzidan(self):
# 判断当前弹夹中是否还有子弹
if len(self.rongnaList) > 0:
# 获取最后压入到弹夹中的子弹
zidan = self.rongnaList[-1]
self.rongnaList.pop()
return zidan
else:
return None
# 子弹类
class Zidan:
def __init__(self,shashangli):
self.shashangli = shashangli
def shanghai(self,diren):
diren.diaoxue(self.shashangli)
#枪类
class Qiang:
def __init__(self):
self.danjia = None
def __str__(self):
if self.danjia:
return '枪当前有弹夹'
else:
return '枪没有弹夹'
def lianjiedanjia(self,danjia):
if not self.danjia:
self.danjia = danjia
def she(self,diren):
zidan = self.danjia.chuzidan()
if zidan:
zidan.shanghai(diren)
else:
print('没有子弹,放了枪---')
# 创建一个人的对象
laowang = Ren('老王')
# 创建一个弹夹
danjia = Danjia(20)
print(danjia)
# 循环的方式创建一颗子弹,然后让老王把这颗子弹压入到弹夹中
i = 0
while i<5:
zidan = Zidan(5)
laowang.anzidan(danjia,zidan)
i += 1
# 测试一下,安装完子弹后,弹夹中的信息
print(danjia)
# 创建一个枪的对象
qiang = Qiang()
print(qiang)
# 创建一个敌人
diren = Ren('敌人')
print(diren)
# 让老王那枪
laowang.naqiang(qiang)
# 老王开枪设人
laowang.kaiqiang(diren)
print(diren)
print(danjia)
| [
"[email protected]"
] | |
ccf114278c3df46b18fe342b6d5710fdad5037a8 | dac12c9178b13d60f401c4febff5569af8aa2719 | /cvat-sdk/cvat_sdk/core/utils.py | 1708dfd5779affd0d04466cf7bdec0ce4858f5c8 | [
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | opencv/cvat | 39dc66ca20f972ba40b79c44d7ce43590dc0b0b5 | 899c9fd75146744def061efd7ab1b1c6c9f6942f | refs/heads/develop | 2023-08-19T04:27:56.974498 | 2023-08-18T09:58:25 | 2023-08-18T09:58:25 | 139,156,354 | 6,558 | 1,887 | MIT | 2023-09-14T12:44:39 | 2018-06-29T14:02:45 | TypeScript | UTF-8 | Python | false | false | 2,077 | py | # Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import contextlib
import itertools
import os
from typing import (
IO,
Any,
BinaryIO,
ContextManager,
Dict,
Iterator,
Literal,
Sequence,
TextIO,
Union,
overload,
)
def filter_dict(
d: Dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None
) -> Dict[str, Any]:
return {k: v for k, v in d.items() if (not keep or k in keep) and (not drop or k not in drop)}
@overload
def atomic_writer(path: Union[os.PathLike, str], mode: Literal["wb"]) -> ContextManager[BinaryIO]:
...
@overload
def atomic_writer(
path: Union[os.PathLike, str], mode: Literal["w"], encoding: str = "UTF-8"
) -> ContextManager[TextIO]:
...
@contextlib.contextmanager
def atomic_writer(
path: Union[os.PathLike, str], mode: Literal["w", "wb"], encoding: str = "UTF-8"
) -> Iterator[IO]:
"""
Returns a context manager that, when entered, returns a handle to a temporary
file opened with the specified `mode` and `encoding`. If the context manager
is exited via an exception, the temporary file is deleted. If the context manager
is exited normally, the file is renamed to `path`.
In other words, this function works like `open()`, but the file does not appear
at the specified path until and unless the context manager is exited
normally.
"""
path_str = os.fspath(path)
for counter in itertools.count():
tmp_path = f"{path_str}.tmp{counter}"
try:
if mode == "w":
tmp_file = open(tmp_path, "xt", encoding=encoding)
elif mode == "wb":
tmp_file = open(tmp_path, "xb")
else:
raise ValueError(f"Unsupported mode: {mode!r}")
break
except FileExistsError:
pass # try next counter value
try:
with tmp_file:
yield tmp_file
os.rename(tmp_path, path)
except:
os.unlink(tmp_path)
raise
| [
"[email protected]"
] | |
561a6ea60d55df6eae3444d16b3ed4c71ae2bb13 | f9544830150779e903c3ba42ca31898d69e4b722 | /cnld/compressed_formats.py | 6544254aa44aeef3bb6b61c5dc5c7fdfdbe39321 | [
"MIT"
] | permissive | bdshieh/cnl-dyna | 44968c7eb22b3aa259dbfa9891db3d0613872c8a | 9013fa11cabb6ad51aaa385b44ef99cc43bf6a2b | refs/heads/master | 2021-08-15T02:12:45.381857 | 2019-10-04T13:58:42 | 2019-10-04T13:58:42 | 151,421,103 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,288 | py | '''
Data-sparse (compressed) formats for matrices using H2Lib data structures.
'''
from timeit import default_timer as timer
import numpy as np
from matplotlib import patches
from matplotlib import pyplot as plt
from scipy.sparse import csr_matrix, issparse
from .h2lib import *
class BaseFormat:
'''
Base class defining abstract interface for formats.
'''
def __init__(self, mat):
self._mat = mat
def __del__(self):
if self._mat is not None:
del self._mat
''' PROPERTIES '''
@property
def rows(self):
return
@property
def cols(self):
return
@property
def shape(self):
return self.rows, self.cols
@property
def ndim(self):
return len(self.shape)
@property
def format(self):
return self.__class__.__name__
''' MAGIC OPERATIONS '''
def _add(self, x):
return NotImplemented
def __add__(self, x):
if not isinstance(x, BaseFormat):
raise ValueError('operation not supported with this type')
if self.shape != x.shape:
raise ValueError('dimension mismatch')
return self._add(x)
def __radd__(self, x):
return self.__add__(x)
def __rmul__(self, x):
if not np.isscalar(x):
return NotImplemented
return self.__mul__(x)
def __mul__(self, x):
return self.dot(x)
def __call__(self, x):
return self * x
def __neg__(self):
return self * -1
def __sub__(self, x):
return self.__add__(-x)
def __rsub__(self, x):
return self.__sub__(x) * -1
''' LINALG OPERATIONS '''
def _smul(self, x):
raise NotImplementedError
def _matmat(self, x):
return NotImplemented
def _matvec(self, x):
return NotImplemented
def matmat(self, X):
# X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError
M, N = self.shape
if X.shape[0] != N:
raise ValueError
Y = self._matmat(X)
return Y
def matvec(self, x):
# x = np.asanyarray(x)
M, N = self.shape
if x.shape != (N, ) and x.shape != (N, 1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M, 1)
return y
def dot(self, x):
if np.isscalar(x):
return self._smul(x)
# # convert all numpy arrays to h2lib arrays
# elif isinstance(x, np.ndarray):
# if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
# xv = AVector.from_array(x)
# else:
# xv = AMatrix.from_array(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError
def _adjoint(self):
return NotImplemented
def _transpose(self):
return NotImplemented
def adjoint(self):
return self._adjoint()
def transpose(self):
return self._transpose()
''' LINALG SOLVING '''
def _lu(self):
raise NotImplementedError
def _chol(self):
raise NotImplementedError
def _lusolve(self, b):
raise NotImplementedError
def _cholsolve(self, b):
raise NotImplementedError
def lu(self):
return self._lu()
def lusolve(self, b):
return self._lusolve(b)
def chol(self):
return self._chol()
def cholsolve(self, b):
return self._cholsolve(b)
class FullFormat(BaseFormat):
'''
Full (dense) matrix format, i.e. no compression.
'''
''' PROPERTIES '''
@property
def rows(self):
return self._mat.rows
@property
def cols(self):
return self._mat.cols
@property
def size(self):
return getsize_amatrix(self._mat)
@property
def data(self):
return np.array(self._mat.a)
''' INDEXING '''
def __getitem__(self, key):
return self._mat.a[key]
def __setitem__(self, key, val):
self._mat.a[key] = val
''' OPERATIONS '''
def _add(self, x):
if isinstance(x, FullFormat):
B = clone_amatrix(self._mat)
add_amatrix(1.0, False, x._mat, B)
return FullFormat(B)
elif isinstance(x, SparseFormat):
B = clone_amatrix(self._mat)
add_sparsematrix_amatrix(1.0, False, x._mat, B)
return FullFormat(B)
elif isinstance(x, HFormat):
B = clone_amatrix(self._mat)
add_hmatrix_amatrix(1.0, False, x._mat, B)
return FullFormat(B)
else:
return NotImplemented
def _smul(self, x):
B = clone_amatrix(self._mat)
scale_amatrix(x, B)
return FullFormat(B)
# def _matmat(self, x):
# if isinstance(x, FullFormat):
# # B = clone_amatrix(self._mat)
# C = new_zero_amatrix(*self.shape)
# addmul_amatrix(1.0, False, self._mat, False, x._mat, C)
# return FullFormat(C)
# elif isinstance(x, SparseFormat):
# raise NotImplementedError('operation not supported with this type')
# elif isinstance(x, HFormat):
# raise NotImplementedError('operation not supported with this type')
# else:
# raise ValueError('operation with unrecognized type')
def _matvec(self, x):
xv = AVector.from_array(x)
y = AVector(x.size)
clear_avector(y)
addeval_amatrix_avector(1.0, self._mat, xv, y)
# addevalsymm_hmatrix_avector(1.0, self._mat, x, y)
out = np.array(y.v)
return out
def _lu(self):
LU = clone_amatrix(self._mat)
succ = lrdecomp_amatrix(LU)
if succ != 0:
raise RuntimeError('failed to calculate LU decomposition')
return FullFormat(LU)
def _chol(self):
CH = clone_amatrix(self._mat)
choldecomp_amatrix(CH)
return FullFormat(CH)
def _lusolve(self, b):
x = AVector.from_array(b)
lrsolve_amatrix_avector(False, self._mat, x)
return np.array(x.v)
def _cholsolve(self, b):
x = AVector.from_array(b)
cholsolve_amatrix_avector(self._mat, x)
return np.array(x.v)
def _triangularsolve(self, b):
x = AVector.from_array(b)
lrsolve_amatrix_avector(False, self._mat, x)
# triangularsolve_amatrix_avector(True, False, True, self._mat, x)
# triangularsolve_amatrix_avector(False, False, False, self._mat, x)
return np.array(x.v)
class SparseFormat(BaseFormat):
'''
Sparse matrix format.
'''
''' PROPERTIES '''
@property
def rows(self):
return self._mat.rows
@property
def cols(self):
return self._mat.cols
@property
def size(self):
return getsize_sparsematrix(self._mat)
@property
def nnz(self):
return self._mat.nz
@property
def row(self):
return self._mat.row
@property
def col(self):
return self._mat.col
@property
def coeff(self):
return self._mat.coeff
''' OPERATIONS '''
def _add(self, x):
return NotImplemented
def _smul(self, x):
raise NotImplementedError('operation not supported with this type')
def _matmat(self, x):
if isinstance(x, FullFormat):
raise NotImplementedError('operation not supported with this type')
elif isinstance(x, SparseFormat):
raise NotImplementedError('operation not supported with this type')
elif isinstance(x, HFormat):
raise NotImplementedError('operation not supported with this type')
else:
raise ValueError('operation with unrecognized type')
def _matvec(self, x):
xv = AVector.from_array(x)
y = AVector(x.size)
clear_avector(y)
addeval_sparsematrix_avector(1.0, self._mat, xv, y)
return np.array(y.v)
def _lu(self):
raise NotImplementedError('operation not supported with this type')
def _chol(self):
raise NotImplementedError('operation not supported with this type')
def _lusolve(self, b):
raise NotImplementedError('operation not supported with this type')
def _cholsolve(self, b):
raise NotImplementedError('operation not supported with this type')
''' OTHER '''
def _as_hformat(self, href):
'''
Convert sparse format to hierarchical format using
the h-structure in href
'''
hm = clonestructure_hmatrix(href)
clear_hmatrix(
hm
) # very important to clear hmatrix otherwise addition doesn't work properly
copy_sparsematrix_hmatrix(self._mat, hm)
return HFormat(hm)
class HFormat(BaseFormat):
'''
Hierarchical matrix format.
'''
''' DATA ATTRIBUTES '''
eps_add = 1e-12
eps_lu = 1e-12
eps_chol = 1e-12
''' PROPERTIES '''
@property
def rows(self):
return getrows_hmatrix(self._mat)
@property
def cols(self):
return getcols_hmatrix(self._mat)
@property
def size(self):
return getsize_hmatrix(self._mat)
''' OPERATIONS '''
def _add(self, x):
if isinstance(x, FullFormat):
B = clone_hmatrix(self._mat)
tm = new_releucl_truncmode()
add_amatrix_hmatrix(1.0, False, x._mat, tm, self.eps_add, B)
return HFormat(B)
elif isinstance(x, SparseFormat):
B = clone_hmatrix(self._mat)
tm = new_releucl_truncmode()
# sparse format is converted to hformat prior to addition
add_hmatrix(1, (x._as_hformat(self._mat))._mat, tm, self.eps_add, B)
return HFormat(B)
elif isinstance(x, HFormat):
B = clone_hmatrix(self._mat)
tm = new_releucl_truncmode()
add_hmatrix(1, x._mat, tm, self.eps_add, B)
return HFormat(B)
else:
return NotImplemented
def _smul(self, x):
id = clonestructure_hmatrix(self._mat)
identity_hmatrix(id)
z = clonestructure_hmatrix(self._mat)
clear_hmatrix(z)
tm = new_releucl_truncmode()
addmul_hmatrix(x, False, id, False, self._mat, tm, self.eps_add, z)
return HFormat(z)
def _matmat(self, x):
if isinstance(x, FullFormat):
raise NotImplementedError('operation not supported with this type')
elif isinstance(x, SparseFormat):
raise NotImplementedError('operation not supported with this type')
elif isinstance(x, HFormat):
C = clonestructure_hmatrix(self._mat)
clear_hmatrix(C)
tm = new_releucl_truncmode()
addmul_hmatrix(1.0, False, x._mat, False, self._mat, tm, self.eps_add, C)
return HFormat(C)
else:
raise ValueError('operation with unrecognized type')
def _matvec(self, x):
xv = AVector.from_array(x)
y = AVector(x.size)
clear_avector(y)
addeval_hmatrix_avector(1.0, self._mat, xv, y)
# addevalsymm_hmatrix_avector(1.0, self._mat, x, y)
return np.array(y.v)
def _lu(self):
LU = clone_hmatrix(self._mat)
tm = new_releucl_truncmode()
lrdecomp_hmatrix(LU, tm, self.eps_lu)
return HFormat(LU)
def _chol(self):
CHOL = clone_hmatrix(self._mat)
tm = new_releucl_truncmode()
choldecomp_hmatrix(CHOL, tm, self.eps_chol)
return HFormat(CHOL)
def _lusolve(self, b):
x = AVector.from_array(b)
lrsolve_hmatrix_avector(False, self._mat, x)
return np.array(x.v)
def _cholsolve(self, b):
x = AVector.from_array(b)
cholsolve_hmatrix_avector(self._mat, x)
return np.array(x.v)
def _triangularsolve(self, b):
x = AVector.from_array(b)
lrsolve_hmatrix_avector(False, self._mat, x)
# triangularsolve_hmatrix_avector(True, False, False, self._mat, x)
# triangularsolve_hmatrix_avector(False, False, False, self._mat, x)
return np.array(x.v)
''' OTHER '''
def _draw_hmatrix(self, hm, bbox, maxidx, ax):
if len(hm.son) == 0:
if hm.r:
rk = str(hm.r.k)
fill = False
elif hm.f:
rk = None
fill = True
else:
raise Exception
x0, y0, x1, y1 = bbox
width, height = x1 - x0, y1 - y0
sq = patches.Rectangle((x0, y0),
width,
height,
edgecolor='black',
fill=fill,
facecolor='black')
ax.add_patch(sq)
if rk:
fontsize = int(round((112 - 6) * width + 6))
if width > 0.03:
ax.text(x0 + 0.05 * width,
y0 + 0.95 * height,
rk,
fontsize=fontsize)
else:
rmax, cmax = maxidx
x0, y0, x1, y1 = bbox
rsidx = (0, 1, 0, 1)
csidx = (0, 0, 1, 1)
width0 = len(hm.son[0].cc.idx) / cmax
height0 = len(hm.son[0].rc.idx) / rmax
for i, s in enumerate(hm.son):
width = len(s.cc.idx) / cmax
height = len(s.rc.idx) / rmax
xnew = x0 if csidx[i] == 0 else x0 + width0
ynew = y0 if rsidx[i] == 0 else y0 + height0
if csidx[i] == 0:
xnew = x0
else:
xnew = x0 + width0
if rsidx[i] == 0:
ynew = y0
else:
ynew = y0 + height0
bbox = xnew, ynew, xnew + width, ynew + height
self._draw_hmatrix(s, bbox, maxidx, ax)
def draw(self):
hm = self._mat
maxidx = len(hm.rc.idx), len(hm.cc.idx)
fig, ax = plt.subplots(figsize=(9, 9))
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.invert_yaxis()
ax.set_aspect('equal')
self._draw_hmatrix(hm, (0, 0, 1, 1), maxidx, ax)
fig.show()
def lu(A, eps=1e-12):
A.eps_lu = eps
return A.lu()
def chol(A, eps=1e-12):
A.eps_chol = eps
return A.chol()
def lusolve(A, b):
return A.lusolve(b)
def cholsolve(A, b):
return A.cholsolve(b)
def _mbk_repr(self):
repr = []
repr.append('MBKMatrix (Mass, Damping, Stiffness Matrix)\n')
repr.append(f' BaseFormat: {self.format}\n')
repr.append(f' Shape: {self.shape}\n')
repr.append(f' Size: {self.size / 1024 / 1024:.2f} MB\n')
return ''.join(repr)
def _z_repr(self):
repr = []
repr.append('ZMatrix (Acoustic Impedance Matrix)\n')
repr.append(f' BaseFormat: {self.format}\n')
repr.append(f' Shape: {self.shape}\n')
repr.append(f' Size: {self.size / 1024 / 1024:.2f} MB\n')
return ''.join(repr)
class MbkFullMatrix(FullFormat):
'''
Mass, Stiffness, and Damping matrix in full format.
'''
def __init__(self, array):
if issparse(array):
array = array.toarray()
start = timer()
MBK = AMatrix.from_array(array)
time_assemble = timer() - start
self._mat = MBK
self._time_assemble = time_assemble
@property
def time_assemble(self):
return self._time_assemble
__repr__ = _mbk_repr
class MbkSparseMatrix(SparseFormat):
'''
Mass, Stiffness, and Damping matrix in sparse format.
'''
def __init__(self, array):
array = csr_matrix(array)
start = timer()
MBK = SparseMatrix.from_array(array)
time_assemble = timer() - start
self._mat = MBK
self._time_assemble = time_assemble
@property
def time_assemble(self):
return self._time_assemble
__repr__ = _mbk_repr
class ZFullMatrix(FullFormat):
'''
Impedance matrix in full format.
'''
def __init__(self, mesh, k, basis='linear', q_reg=2, q_sing=4, **kwargs):
if basis.lower() in ['constant']:
_basis = basisfunctionbem3d.CONSTANT
elif basis.lower() in ['linear']:
_basis = basisfunctionbem3d.LINEAR
else:
raise TypeError
bem = new_slp_helmholtz_bem3d(k, mesh.surface3d, q_reg, q_sing, _basis, _basis)
Z = AMatrix(len(mesh.vertices), len(mesh.vertices))
start = timer()
assemble_bem3d_amatrix(bem, Z)
time_assemble = timer() - start
self._mat = Z
self._time_assemble = time_assemble
# self._bem = bem
@property
def time_assemble(self):
return self._time_assemble
__repr__ = _z_repr
class ZHMatrix(HFormat):
'''
Impedance matrix in hierarchical format.
'''
def __init__(self,
mesh,
k,
basis='linear',
m=4,
q_reg=2,
q_sing=4,
aprx='paca',
admis='2',
eta=1.0,
eps_aca=1e-2,
strict=False,
clf=16,
rk=0,
**kwargs):
if basis.lower() in ['constant']:
_basis = basisfunctionbem3d.CONSTANT
elif basis.lower() in ['linear']:
_basis = basisfunctionbem3d.LINEAR
else:
raise TypeError
bem = new_slp_helmholtz_bem3d(k, mesh.surface3d, q_reg, q_sing, _basis, _basis)
root = build_bem3d_cluster(bem, clf, _basis)
if strict:
broot = build_strict_block(root, root, eta, admis)
else:
broot = build_nonstrict_block(root, root, eta, admis)
if aprx.lower() in ['aca']:
setup_hmatrix_aprx_inter_row_bem3d(bem, root, root, broot, m)
elif aprx.lower() in ['paca']:
setup_hmatrix_aprx_paca_bem3d(bem, root, root, broot, eps_aca)
elif aprx.lower() in ['hca']:
setup_hmatrix_aprx_hca_bem3d(bem, root, root, broot, m, eps_aca)
elif aprx.lower() in ['inter_row']:
setup_hmatrix_aprx_inter_row_bem3d(bem, root, root, broot, m)
Z = build_from_block_hmatrix(broot, rk)
start = timer()
assemble_bem3d_hmatrix(bem, broot, Z)
time_assemble = timer() - start
self._mat = Z
self._time_assemble = time_assemble
# keep references to h2lib objects so they don't get garbage collected
self._root = root
# important! don't ref bem and broot otherwise processes fail to terminate (not sure why)
# self._bem = bem
self._broot = broot
def __del__(self):
del self._mat
del self._root
# del self._bem
del self._broot
@property
def time_assemble(self):
return self._time_assemble
__repr__ = _z_repr
| [
"[email protected]"
] | |
1152d6bd4d2f89c9d67e5d435c268860a0a3237e | b7e6f93f913a5a234a61df8e9c937c5038e7559b | /publication/admin.py | c9955c445b23eb762d151a8659a0b04543877c3a | [] | no_license | Ansagan-Kabdolla/dikush | 9d6cbb1acb699af110f0208a3c7e95d5445e6fa1 | 297a5f2a0ea426e8c41d33abd02661811e536d18 | refs/heads/master | 2022-04-15T06:32:52.382739 | 2020-04-10T09:22:04 | 2020-04-10T09:22:04 | 254,590,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.contrib import admin
from .models import *
admin.site.register(Filepdf)
admin.site.register(Predmeti) | [
"[email protected]"
] | |
e53556e3a604085aa6f7add4581285a4e52224d2 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/sensorpro/device.py | 326eb8b8bbd743ce3d1776ded36c6a74498130aa | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 442 | py | """Support for SensorPro devices."""
from __future__ import annotations
from sensorpro_ble import DeviceKey
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothEntityKey,
)
def device_key_to_bluetooth_entity_key(
device_key: DeviceKey,
) -> PassiveBluetoothEntityKey:
"""Convert a device key to an entity key."""
return PassiveBluetoothEntityKey(device_key.key, device_key.device_id)
| [
"[email protected]"
] | |
a53c1b3c8f40c312958b8b0d54c48ea0ac2ffa34 | 824f831ce0921b3e364060710c9e531f53e52227 | /Leetcode/Arrays/LC-287. Find the Duplicate Number.py | 5901d94bc86c781e7f2bae0c1ee771b979e4f97d | [] | no_license | adityakverma/Interview_Prepration | e854ff92c10d05bc2c82566ea797d2ce088de00a | d08a7f728c53943e9a27c33f8e4249633a69d1a6 | refs/heads/master | 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py |
# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist. Assume that there is only one duplicate
# number, find the duplicate one.
# Example 1:
#
# Input: [1,3,4,2,2]
# Output: 2
#
# Example 2:
#
# Input: [3,1,3,4,2]
# Output: 3
# =================================================================================================
# Excellent Binary Search Solution: O(nlogn)
# https://leetcode.com/problems/find-the-duplicate-number/discuss/72844/Two-Solutions-(with-explanation):-O(nlog(n))-and-O(n)-time-O(1)-space-without-changing-the-input-array
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
low = 1
high = len(nums) - 1
while low <= high:
mid = low + (high - low) / 2
count = 0
for i in nums:
if i <= mid:
count += 1
if count > mid:
high = mid - 1
# print "lower half. low & high are", low, high
else:
low = mid + 1
# print "upper half. low & high are", low, high
return low
'''
This solution is based on binary search.
At first the search space is numbers between 1 to n. Each time I select a number mid (which is the one in the middle) and count all the numbers equal to or less than mid. Then if the count is more than mid, the search space will be [1 mid] otherwise [mid+1 n]. I do this until search space is only one number.
Let's say n=10 and I select mid=5. Then I count all the numbers in the array which are less than equal mid. If the there are more than 5 numbers that are less than 5, then by Pigeonhole Principle (https://en.wikipedia.org/wiki/Pigeonhole_principle) one of them has occurred more than once. So I shrink the search space from [1 10] to [1 5]. Otherwise the duplicate number is in the second half so for the next step the search space would be [6 10].
'''
#------------------------
# Regular logic
'''
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i-1]:
return nums[i]
'''
#---------------------------
# Cycle Detection Solution - O(n)
# https://leetcode.com/problems/find-the-duplicate-number/discuss/72846/My-easy-understood-solution-with-O(n)-time-and-O(1)-space-without-modifying-the-array.-With-clear-explanation.
# https://leetcode.com/problems/find-the-duplicate-number/solution/#
'''
def findDuplicate(self, nums):
# Find the intersection point of the two runners.
tortoise = nums[0]
hare = nums[0]
while True:
tortoise = nums[tortoise]
hare = nums[nums[hare]]
if tortoise == hare:
break
# Find the "entrance" to the cycle.
ptr1 = nums[0]
ptr2 = tortoise
while ptr1 != ptr2:
ptr1 = nums[ptr1]
ptr2 = nums[ptr2]
return ptr1
'''
| [
"[email protected]"
] | |
0cef47601e24ce2571e4fae9c030c83522f29d60 | 4505ae4b6fee0e32d799f22c32b18f79884daef4 | /src/keras/tests/test_loss_masking.py | 36ad471de77211bb1b4ed6288f94f34c0bdec80b | [
"MIT",
"Apache-2.0"
] | permissive | lu791019/iii_HA_Image_Recognition_DL | 5cde9c2d0c06f8fe3fb69991b27fda87d42450e1 | d5f56d62af6d3aac1c216ca4ff309db08a8c9072 | refs/heads/master | 2020-08-03T06:56:05.345175 | 2019-09-29T13:20:24 | 2019-09-29T13:20:24 | 211,660,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | import numpy as np
import pytest
from keras.models import Sequential
from keras.engine.training_utils import weighted_masked_objective
from keras.layers import TimeDistributed, Masking, Dense
from keras import losses
from keras import backend as K
def create_masking_model():
model = Sequential()
model.add(Masking(mask_value=0, input_shape=(None, 1)))
model.add(TimeDistributed(Dense(1, kernel_initializer='one')))
model.compile(loss='mse', optimizer='sgd')
return model
def test_masking():
np.random.seed(1337)
x = np.array([[[1], [1]],
[[0], [0]]])
model = create_masking_model()
y = np.array([[[1], [1]],
[[1], [1]]])
loss = model.train_on_batch(x, y)
assert loss == 0
def test_masking_is_all_zeros():
x = y = np.array([[[0], [0]]])
model = create_masking_model()
loss = model.train_on_batch(x, y)
assert loss == 0
def test_loss_masking():
weighted_loss = weighted_masked_objective(losses.get('mae'))
shape = (3, 4, 2)
x = np.arange(24).reshape(shape)
y = 2 * x
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(x),
K.variable(y),
K.variable(weights),
K.variable(mask)))
if __name__ == '__main__':
pytest.main([__file__])
| [
"[email protected]"
] | |
c67d6fe7ce1bebab2ccccf1fba0bb20116d81484 | 98a5677396a4fdaad36ff8bb67ca08a8a79f2c13 | /example/toolbox/management/commands/createcalaccessrawmodeldocs.py | 556aeea30f7002572d4f9c2ebe907c70da08d12d | [
"MIT"
] | permissive | livlab/django-calaccess-raw-data | 542255e5ad9ca50996163591cb7b0f24f57724ff | 3fd8b7505e158cb3159603ce4f42e3508af9e0bf | refs/heads/master | 2020-04-05T23:07:01.637833 | 2015-08-29T18:19:08 | 2015-08-29T18:19:08 | 41,603,060 | 1 | 0 | null | 2015-08-29T18:23:17 | 2015-08-29T18:23:17 | null | UTF-8 | Python | false | false | 1,089 | py | import os
from django.conf import settings
from calaccess_raw import get_model_list
from django.template.loader import render_to_string
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = 'Generate documentation for raw CAL-ACCESS database models'
def handle(self, *args, **kwargs):
self.docs_dir = os.path.join(
settings.REPO_DIR,
'docs'
)
self.target_path = os.path.join(self.docs_dir, 'models.rst')
model_list = sorted(get_model_list(), key=lambda x:x().klass_name)
group_list = {}
for m in model_list:
try:
group_list[m().klass_group].append(m)
except KeyError:
group_list[m().klass_group] = [m]
group_list = sorted(group_list.items(), key=lambda x:x[0])
context = {
'group_list': group_list,
}
rendered = render_to_string('toolbox/models.rst', context)
with open(self.target_path, 'w') as target_file:
target_file.write(rendered)
| [
"[email protected]"
] | |
0b45f2cad03d55adf2caed48ec14aabf6dd1204f | 4ee2ebef215cf879aafdfa44221f52d82775176a | /Inheritance/Exercise/03-Players_And_Monsters/project/wizard.py | 6c54fe969f329c58c92fa42e89364eb28a5deac0 | [] | no_license | Avstrian/SoftUni-Python-OOP | d2a9653863cba7bc095e647cd3f0561377f10f6d | 6789f005b311039fd46ef1f55f3eb6fa9313e5a6 | refs/heads/main | 2023-08-01T09:31:38.099842 | 2021-08-24T04:21:38 | 2021-08-24T04:21:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from project.hero import Hero
class Wizard(Hero):
def __init__(self, username, level):
super().__init__(username, level)
def __str__(self):
return f"{self.username} of type Wizard has level {self.level}" | [
"[email protected]"
] | |
14b95097ebef6310a10450b50fa85478aad59ebf | ef08d1e969a53c279e75b0120683eb3ec6914adf | /App/models.py | e0b07d140b75aa8ec9c5b43d15a64c2d31bf5e35 | [] | no_license | yuansuixin/learn-flask-city | 7278fa567b5d6825fd2c121114495092c4612a09 | f8bfd1d8daff9c18c1ee71f1770fdb83ce6dfa2f | refs/heads/master | 2021-04-09T15:31:26.120510 | 2018-03-19T08:54:59 | 2018-03-19T08:54:59 | 125,822,109 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | from App.ext import model
class Provice(model.Model):
pid = model.Column(model.Integer,primary_key=True,autoincrement=True)
name = model.Column(model.String(32))
cities = model.relationship('City',backref='Provice',lazy='dynamic')
class City(model.Model):
cid = model.Column(model.Integer,primary_key=True,autoincrement=True)
name = model.Column(model.String(32))
provice = model.Column(model.Integer,model.ForeignKey(Provice.pid))
villages = model.relationship('Village', backref='City', lazy='dynamic')
class Village(model.Model):
vid = model.Column(model.Integer, primary_key=True, autoincrement=True)
name = model.Column(model.String(32))
city = model.Column(model.Integer, model.ForeignKey(City.cid))
| [
"[email protected]"
] | |
dbf46d8ca805cf2f79e58f27d726c87ea0b78fa6 | 264787b5f42d482db2ef0838b45ec79db71e6e2f | /home/views.py | 1edd90b3506e5941e7f0205e6a6a010d7ff9f5b2 | [] | no_license | felipefoc/PrecoCertoChallenge2 | f0700788f363ce9b72234a3d7df35ef1ea78d6b2 | 446854bc1823a06b207b30a10e14e71f7c982bee | refs/heads/main | 2023-04-21T23:04:34.942298 | 2021-05-10T22:10:10 | 2021-05-10T22:10:10 | 366,187,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | from django.shortcuts import render
from django.views.generic import ListView
from home.models import Product, Order, ProductSold
from django.db.models import Q
import json
import datetime
def homeview(request):
with open('data.json', 'r+') as f:
data = json.load(f)
# y = ProductSold.objects.get(id=1)
# aa =ProductSold.objects.get(id=2)
# x = Order.objects.create(
# status='Finalizado',
# date=datetime.datetime.now(),
# total=10.00,
# )
# x.save()
# x.product_sold.add(y)
# x.product_sold.add(aa)
eita = Order.objects.all()
ob = Product.objects.all()
ab = ProductSold.objects.all()
for i in eita:
print(i.product_sold.quantity)
# for i in data['Orders']:
# print(i['Order']['ProductsSold'])
# sku = i['Order']['ProductsSold'][0]['ProductsSold']['sku']
# name = i['Order']['ProductsSold'][0]['ProductsSold']['name']
# quantity = i['Order']['ProductsSold'][0]['ProductsSold']['quantity']
# price = float(i['Order']['ProductsSold'][0]['ProductsSold']['price']) / int(quantity)
# cost_price = i['Order']['ProductsSold'][0]['ProductsSold']['cost_price']
# print(sku, name, quantity, price, cost_price)
return render(request, template_name='index.html', context={'data': ob, 'ab':ab, 'eita': eita})
# Create your views here.
# class HomeAPIView(ListView):
| [
"[email protected]"
] | |
e02f281a024f31c0d46eb8c5482cbf9893fe7f56 | 4c601eaa346e660c296e270cc2d79aea9a3721fe | /tests/components/homekit_controller/specific_devices/test_homeassistant_bridge.py | e9fc9b522ea9223ad12b87d8feb8101ae4925a4a | [
"Apache-2.0"
] | permissive | basnijholt/home-assistant | f55110af9ff602274c0a929c7298ef97a0ef282f | ba55b4b8338a2dc0ba3f1d750efea49d86571291 | refs/heads/dev | 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 | Apache-2.0 | 2023-01-13T06:04:49 | 2019-11-07T19:29:54 | Python | UTF-8 | Python | false | false | 1,900 | py | """Test against characteristics captured from the Home Assistant HomeKit bridge running demo platforms."""
from homeassistant.components.fan import (
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
)
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_homeassistant_bridge_fan_setup(hass):
"""Test that a SIMPLEconnect fan can be correctly setup in HA."""
accessories = await setup_accessories_from_file(
hass, "home_assistant_bridge_fan.json"
)
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Check that the fan is correctly found and set up
fan_id = "fan.living_room_fan"
fan = entity_registry.async_get(fan_id)
assert fan.unique_id == "homekit-fan.living_room_fan-8"
fan_helper = Helper(
hass, "fan.living_room_fan", pairing, accessories[0], config_entry,
)
fan_state = await fan_helper.poll_and_get_state()
assert fan_state.attributes["friendly_name"] == "Living Room Fan"
assert fan_state.state == "off"
assert fan_state.attributes["supported_features"] == (
SUPPORT_DIRECTION | SUPPORT_SET_SPEED | SUPPORT_OSCILLATE
)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(fan.device_id)
assert device.manufacturer == "Home Assistant"
assert device.name == "Living Room Fan"
assert device.model == "Fan"
assert device.sw_version == "0.104.0.dev0"
bridge = device = device_registry.async_get(device.via_device_id)
assert bridge.manufacturer == "Home Assistant"
assert bridge.name == "Home Assistant Bridge"
assert bridge.model == "Bridge"
assert bridge.sw_version == "0.104.0.dev0"
| [
"[email protected]"
] | |
178e0b903a4b668e29a729fd8922c375f9bafabd | e560841b98656129df8ea169d8826eadd63d32db | /siteroot/bookz/apps.py | 817b612740abd87a1eb1910fea5f17930486ccc8 | [] | no_license | DorogAD/ibook | 313e222836e5c9cb824937b7932ccf2680886098 | 07df6a7543fb80e088fdeb7b502602c81a00ed6f | refs/heads/master | 2023-02-15T03:37:29.996409 | 2021-01-14T06:36:38 | 2021-01-14T06:36:38 | 305,949,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class BookzConfig(AppConfig):
name = 'bookz'
| [
"[email protected]"
] | |
9b9c66415e4cc7864cca1de30a392308b4d18434 | 3deef77b752c9940ac1cbe35dbcfb6a9ede59c67 | /12day/03.WSGIServer.py | d2a68325e7e005d5449172bdc125a07b9a6af31b | [] | no_license | vstarman/python_codes | c682a4aa96e90172da6292f4e245da4a41c97531 | 64ddd38af6cf65861602620a0196bc460bc359d4 | refs/heads/master | 2021-09-05T01:31:55.882001 | 2018-01-23T12:43:45 | 2018-01-23T12:43:45 | 114,988,789 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | import socket, time, re, sys
class WSGIServer():
"""定义一个wsgi服务器的类"""
def __init__(self, documents_root, port=8080):
# creat
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# reuse
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# BIND
self.server_socket.bind(("", port))
# listen
self.server_socket.listen(128)
# 非阻塞
self.server_socket.setblocking(False)
# 存放client_socket
self.client_socket_list = []
self.documents_root = documents_root
def run_forever(self):
"""运行服务器"""
while True:
try:
#time.sleep(0.5)
new_socket, new_addr = self.server_socket.accept()
except Exception as e:
print("---------1---------",e)
else:
new_socket.setblocking(False)
self.client_socket_list.append(new_socket)
for client_socket in self.client_socket_list:
try:
request = client_socket.recv(1024).decode()
except Exception as e:
print("---------2---------", e)
else:
if request:
self.deal_with_request(request, client_socket)
else:
client_socket.close()
self.client_socket_list.remove(client_socket)
print(self.client_socket_list)
def deal_with_request(self, request, client_socket):
"""为当前浏览器服务"""
if not request:
return
request_lines = request.splitlines()
for i, line in enumerate(request_lines):
print(i, "\t", line)
# 提取请求文件
ret = re.match(r"[^/]*([^ ]+)", request_lines[0])
if ret:
print("提取数据>>>>[%s]" % ret.group(1))
file_name = ret.group(1)
if file_name == "/":
file_name = "index.html"
else:
return
# 读文件
try:
f = open(self.documents_root + file_name, "rb")
except:
response_body = "file not found, 请输入正确的url"
response_header = "HTTP/1.1 404 not found\r\n"
response_header += "Content-Type: text/html; charset=utf-8\r\n"
response_header += "Content-Length: %d\r\n" % (len(response_body))
response_header += "\r\n"
# 返回浏览器
client_socket.send((response_header + response_body).encode())
else:
content = f.read()
f.close()
response_body = content
response_header = "HTTP/1.1 200 0K\r\n"
response_header += "Content-Length: %d\r\n" % (len(response_body))
response_header += "\r\n"
client_socket.send((response_header + response_body).encode())
def main():
"""控制web服务器整体"""
if len(sys.argv) == 2:
port = sys.argv[1]
if port.isdigit():
port = int(port)
elif len(sys.argv) == 1:
port = None
else:
print("运行方式如: python3 xxx.py 7890")
http_server = WSGIServer("./html", port)
http_server.run_forever()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
79d6585724e04b28bac073cb392c9c5ef0bd59b8 | d4a569dcf616b7f05e53a44803e38196b436b8b9 | /[email protected]/Lib/site-packages/mypy/typeshed/stdlib/3/sys.pyi | d4f755c1ad33599685ded3ba3658e1b9ccca7d02 | [
"MIT"
] | permissive | nverbois/TFE21-232 | ac3178d24939c872c02a671c0f1d8cc471af516b | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | refs/heads/main | 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,925 | pyi | # Stubs for sys
# Ron Murawski <[email protected]>
# based on http://docs.python.org/3.2/library/sys.html
from typing import (
List,
NoReturn,
Sequence,
Any,
Dict,
Tuple,
TextIO,
overload,
Optional,
Union,
TypeVar,
Callable,
Type,
)
import sys
from types import FrameType, ModuleType, TracebackType
from importlib.abc import MetaPathFinder
_T = TypeVar("_T")
# The following type alias are stub-only and do not exist during runtime
_ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
_OptExcInfo = Union[_ExcInfo, Tuple[None, None, None]]
# ----- sys variables -----
abiflags: str
argv: List[str]
base_exec_prefix: str
base_prefix: str
byteorder: str
builtin_module_names: Sequence[str] # actually a tuple of strings
copyright: str
# dllhandle = 0 # Windows only
dont_write_bytecode: bool
displayhook: Callable[[object], Any]
excepthook: Callable[[Type[BaseException], BaseException, TracebackType], Any]
exec_prefix: str
executable: str
float_repr_style: str
hexversion: int
last_type: Optional[Type[BaseException]]
last_value: Optional[BaseException]
last_traceback: Optional[TracebackType]
maxsize: int
maxunicode: int
meta_path: List[MetaPathFinder]
modules: Dict[str, ModuleType]
path: List[str]
path_hooks: List[Any] # TODO precise type; function, path to finder
path_importer_cache: Dict[str, Any] # TODO precise type
platform: str
if sys.version_info >= (3, 9):
platlibdir: str
prefix: str
if sys.version_info >= (3, 8):
pycache_prefix: Optional[str]
ps1: str
ps2: str
stdin: TextIO
stdout: TextIO
stderr: TextIO
__stdin__: TextIO
__stdout__: TextIO
__stderr__: TextIO
tracebacklimit: int
version: str
api_version: int
warnoptions: Any
# Each entry is a tuple of the form (action, message, category, module,
# lineno)
# winver = '' # Windows only
_xoptions: Dict[Any, Any]
flags: _flags
class _flags:
debug: int
division_warning: int
inspect: int
interactive: int
optimize: int
dont_write_bytecode: int
no_user_site: int
no_site: int
ignore_environment: int
verbose: int
bytes_warning: int
quiet: int
hash_randomization: int
if sys.version_info >= (3, 7):
dev_mode: int
utf8_mode: int
float_info: _float_info
class _float_info:
epsilon: float # DBL_EPSILON
dig: int # DBL_DIG
mant_dig: int # DBL_MANT_DIG
max: float # DBL_MAX
max_exp: int # DBL_MAX_EXP
max_10_exp: int # DBL_MAX_10_EXP
min: float # DBL_MIN
min_exp: int # DBL_MIN_EXP
min_10_exp: int # DBL_MIN_10_EXP
radix: int # FLT_RADIX
rounds: int # FLT_ROUNDS
hash_info: _hash_info
class _hash_info:
width: int
modulus: int
inf: int
nan: int
imag: int
implementation: _implementation
class _implementation:
name: str
version: _version_info
hexversion: int
cache_tag: str
int_info: _int_info
class _int_info:
bits_per_digit: int
sizeof_digit: int
class _version_info(Tuple[int, int, int, str, int]):
major: int
minor: int
micro: int
releaselevel: str
serial: int
version_info: _version_info
def call_tracing(__func: Callable[..., _T], __args: Any) -> _T: ...
def _clear_type_cache() -> None: ...
def _current_frames() -> Dict[int, Any]: ...
def _debugmallocstats() -> None: ...
def __displayhook__(value: object) -> None: ...
def __excepthook__(
type_: Type[BaseException], value: BaseException, traceback: TracebackType
) -> None: ...
def exc_info() -> _OptExcInfo: ...
# sys.exit() accepts an optional argument of anything printable
def exit(__status: object = ...) -> NoReturn: ...
def getdefaultencoding() -> str: ...
if sys.platform != "win32":
# Unix only
def getdlopenflags() -> int: ...
def getfilesystemencoding() -> str: ...
def getrefcount(__object: Any) -> int: ...
def getrecursionlimit() -> int: ...
@overload
def getsizeof(obj: object) -> int: ...
@overload
def getsizeof(obj: object, default: int) -> int: ...
def getswitchinterval() -> float: ...
def _getframe(__depth: int = ...) -> FrameType: ...
_ProfileFunc = Callable[[FrameType, str, Any], Any]
def getprofile() -> Optional[_ProfileFunc]: ...
def setprofile(profilefunc: Optional[_ProfileFunc]) -> None: ...
_TraceFunc = Callable[
[FrameType, str, Any], Optional[Callable[[FrameType, str, Any], Any]]
]
def gettrace() -> Optional[_TraceFunc]: ...
def settrace(tracefunc: Optional[_TraceFunc]) -> None: ...
class _WinVersion(
Tuple[int, int, int, int, str, int, int, int, int, Tuple[int, int, int]]
):
major: int
minor: int
build: int
platform: int
service_pack: str
service_pack_minor: int
service_pack_major: int
suite_mast: int
product_type: int
platform_version: Tuple[int, int, int]
def getwindowsversion() -> _WinVersion: ... # Windows only
def intern(__string: str) -> str: ...
def is_finalizing() -> bool: ...
if sys.version_info >= (3, 7):
__breakpointhook__: Any # contains the original value of breakpointhook
def breakpointhook(*args: Any, **kwargs: Any) -> Any: ...
def setdlopenflags(__flags: int) -> None: ... # Linux only
def setrecursionlimit(__limit: int) -> None: ...
def setswitchinterval(__interval: float) -> None: ...
def gettotalrefcount() -> int: ... # Debug builds only
if sys.version_info < (3, 9):
def getcheckinterval() -> int: ... # deprecated
def setcheckinterval(__n: int) -> None: ... # deprecated
if sys.version_info >= (3, 8):
# not exported by sys
class UnraisableHookArgs:
exc_type: Type[BaseException]
exc_value: Optional[BaseException]
exc_traceback: Optional[TracebackType]
err_msg: Optional[str]
object: Optional[object]
unraisablehook: Callable[[UnraisableHookArgs], Any]
def addaudithook(hook: Callable[[str, Tuple[Any, ...]], Any]) -> None: ...
def audit(__event: str, *args: Any) -> None: ...
| [
"[email protected]"
] | |
3f9b710162d8d237511ac4d4e3ac58cf8db9305b | 5308d3624036fb27ca158b520d2c59b643f8bf32 | /galsim/moffat.py | f27dba6f4049084ac7c3edcb410abadf5e9eaf0c | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kernsuite-debian/galsim | bfacc7f665e35595189c03b164e61809c2943cc5 | 1515537b429fb3337d5c1090f9161f0fb223f2a0 | refs/heads/master | 2022-11-06T21:22:51.794046 | 2018-08-29T18:51:56 | 2018-08-29T18:51:56 | 82,295,722 | 0 | 1 | NOASSERTION | 2022-10-19T06:05:05 | 2017-02-17T12:33:53 | Python | UTF-8 | Python | false | false | 8,691 | py | # Copyright (c) 2012-2018 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import numpy as np
import math
from . import _galsim
from .gsobject import GSObject
from .gsparams import GSParams
from .utilities import lazy_property, doc_inherit
from .position import PositionD
from .errors import GalSimRangeError, GalSimIncompatibleValuesError, convert_cpp_errors
class Moffat(GSObject):
"""A class describing a Moffat surface brightness profile.
The Moffat surface brightness profile is I(R) ~ [1 + (r/scale_radius)^2]^(-beta). The
GalSim representation of a Moffat profile also includes an optional truncation beyond a given
radius.
For more information, refer to
http://home.fnal.gov/~neilsen/notebook/astroPSF/astroPSF.html
Initialization
--------------
A Moffat can be initialized using one (and only one) of three possible size parameters:
`scale_radius`, `fwhm`, or `half_light_radius`. Exactly one of these three is required.
@param beta The `beta` parameter of the profile.
@param scale_radius The scale radius of the profile. Typically given in arcsec.
[One of `scale_radius`, `fwhm`, or `half_light_radius` is required.]
@param half_light_radius The half-light radius of the profile. Typically given in arcsec.
[One of `scale_radius`, `fwhm`, or `half_light_radius` is required.]
@param fwhm The full-width-half-max of the profile. Typically given in arcsec.
[One of `scale_radius`, `fwhm`, or `half_light_radius` is required.]
@param trunc An optional truncation radius at which the profile is made to drop to
zero, in the same units as the size parameter.
[default: 0, indicating no truncation]
@param flux The flux (in photons/cm^2/s) of the profile. [default: 1]
@param gsparams An optional GSParams argument. See the docstring for GSParams for
details. [default: None]
Methods and Properties
----------------------
In addition to the usual GSObject methods, Moffat has the following access properties:
>>> beta = moffat_obj.beta
>>> rD = moffat_obj.scale_radius
>>> fwhm = moffat_obj.fwhm
>>> hlr = moffat_obj.half_light_radius
"""
_req_params = { "beta" : float }
_opt_params = { "trunc" : float , "flux" : float }
_single_params = [ { "scale_radius" : float, "half_light_radius" : float, "fwhm" : float } ]
_takes_rng = False
_is_axisymmetric = True
_is_analytic_x = True
_is_analytic_k = True
# The conversion from hlr or fwhm to scale radius is complicated for Moffat, especially
# since we allow it to be truncated, which matters for hlr. So we do these calculations
# in the C++-layer constructor.
def __init__(self, beta, scale_radius=None, half_light_radius=None, fwhm=None, trunc=0.,
flux=1., gsparams=None):
self._beta = float(beta)
self._trunc = float(trunc)
self._flux = float(flux)
self._gsparams = GSParams.check(gsparams)
if self._trunc == 0. and self._beta <= 1.1:
raise GalSimRangeError("Moffat profiles with beta <= 1.1 must be truncated",
beta, 1.1)
if self._trunc < 0.:
raise GalSimRangeError("Moffat trunc must be >= 0", self._trunc, 0.)
# Parse the radius options
if half_light_radius is not None:
if scale_radius is not None or fwhm is not None:
raise GalSimIncompatibleValuesError(
"Only one of scale_radius, half_light_radius, or fwhm may be specified",
half_light_radius=half_light_radius, scale_radius=scale_radius, fwhm=fwhm)
self._hlr = float(half_light_radius)
if self._trunc > 0. and self._trunc <= math.sqrt(2.) * self._hlr:
raise GalSimRangeError("Moffat trunc must be > sqrt(2) * half_light_radius.",
self._trunc, math.sqrt(2.) * self._hlr)
with convert_cpp_errors():
self._r0 = _galsim.MoffatCalculateSRFromHLR(self._hlr, self._trunc, self._beta)
self._fwhm = 0.
elif fwhm is not None:
if scale_radius is not None:
raise GalSimIncompatibleValuesError(
"Only one of scale_radius, half_light_radius, or fwhm may be specified",
half_light_radius=half_light_radius, scale_radius=scale_radius, fwhm=fwhm)
self._fwhm = float(fwhm)
self._r0 = self._fwhm / (2. * math.sqrt(2.**(1./self._beta) - 1.))
self._hlr = 0.
elif scale_radius is not None:
self._r0 = float(scale_radius)
self._hlr = 0.
self._fwhm = 0.
else:
raise GalSimIncompatibleValuesError(
"One of scale_radius, half_light_radius, or fwhm must be specified",
half_light_radius=half_light_radius, scale_radius=scale_radius, fwhm=fwhm)
@lazy_property
def _sbp(self):
with convert_cpp_errors():
return _galsim.SBMoffat(self._beta, self._r0, self._trunc, self._flux,
self.gsparams._gsp)
def getFWHM(self):
"""Return the FWHM for this Moffat profile.
"""
def getHalfLightRadius(self):
"""Return the half light radius for this Moffat profile.
"""
@property
def beta(self): return self._beta
@property
def scale_radius(self): return self._r0
@property
def trunc(self): return self._trunc
@property
def half_light_radius(self):
if self._hlr == 0.:
self._hlr = self._sbp.getHalfLightRadius()
return self._hlr
@lazy_property
def fwhm(self):
if self._fwhm == 0.:
self._fwhm = self._r0 * (2. * math.sqrt(2.**(1./self._beta) - 1.))
return self._fwhm
def __eq__(self, other):
return (isinstance(other, Moffat) and
self.beta == other.beta and
self.scale_radius == other.scale_radius and
self.trunc == other.trunc and
self.flux == other.flux and
self.gsparams == other.gsparams)
def __hash__(self):
return hash(("galsim.Moffat", self.beta, self.scale_radius, self.trunc, self.flux,
self.gsparams))
def __repr__(self):
return 'galsim.Moffat(beta=%r, scale_radius=%r, trunc=%r, flux=%r, gsparams=%r)'%(
self.beta, self.scale_radius, self.trunc, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.Moffat(beta=%s, scale_radius=%s'%(self.beta, self.scale_radius)
if self.trunc != 0.:
s += ', trunc=%s'%self.trunc
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
return self._sbp.maxK()
@property
def _stepk(self):
return self._sbp.stepK()
@property
def _has_hard_edges(self):
return self._trunc != 0.
@property
def _max_sb(self):
return self._sbp.maxSB()
@doc_inherit
def _xValue(self, pos):
return self._sbp.xValue(pos._p)
@doc_inherit
def _kValue(self, kpos):
return self._sbp.kValue(kpos._p)
@doc_inherit
def _drawReal(self, image):
self._sbp.draw(image._image, image.scale)
@doc_inherit
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
@doc_inherit
def _drawKImage(self, image):
self._sbp.drawK(image._image, image.scale)
| [
"[email protected]"
] | |
a743c452d4c63fee68c318c62fa4043b50388f12 | 7137161629a1003583744cc3bd0e5d3498e0a924 | /airflow/migrations/versions/142555e44c17_add_data_interval_start_end_to_dagmodel_and_dagrun.py | 2eedcb81c6444f27cf2d8e79fb281f9a9134fc2e | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jbampton/airflow | 3fca85975854eb916f16143b659a9119af143963 | dcfa14d60dade3fdefa001d10013466fe4d77f0d | refs/heads/master | 2023-05-25T22:31:49.104069 | 2021-09-18T19:18:32 | 2021-09-18T19:18:32 | 247,645,744 | 3 | 0 | Apache-2.0 | 2020-03-16T08:12:58 | 2020-03-16T08:12:57 | null | UTF-8 | Python | false | false | 2,859 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add data_interval_[start|end] to DagModel and DagRun.
Revision ID: 142555e44c17
Revises: 54bebd308c5f
Create Date: 2021-06-09 08:28:02.089817
"""
from alembic import op
from sqlalchemy import TIMESTAMP, Column
from sqlalchemy.dialects import mssql, mysql
# Revision identifiers, used by Alembic.
revision = "142555e44c17"
down_revision = "54bebd308c5f"
branch_labels = None
depends_on = None
def _use_date_time2(conn):
result = conn.execute(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
).fetchone()
mssql_version = result[0]
return mssql_version not in ("2000", "2005")
def _get_timestamp(conn):
dialect_name = conn.dialect.name
if dialect_name == "mysql":
return mysql.TIMESTAMP(fsp=6, timezone=True)
if dialect_name != "mssql":
return TIMESTAMP(timezone=True)
if _use_date_time2(conn):
return mssql.DATETIME2(precision=6)
return mssql.DATETIME
def upgrade():
"""Apply data_interval fields to DagModel and DagRun."""
column_type = _get_timestamp(op.get_bind())
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(Column("data_interval_start", column_type))
batch_op.add_column(Column("data_interval_end", column_type))
with op.batch_alter_table("dag") as batch_op:
batch_op.add_column(Column("next_dagrun_data_interval_start", column_type))
batch_op.add_column(Column("next_dagrun_data_interval_end", column_type))
def downgrade():
"""Unapply data_interval fields to DagModel and DagRun."""
with op.batch_alter_table("dag_run") as batch_op:
batch_op.drop_column("data_interval_start")
batch_op.drop_column("data_interval_end")
with op.batch_alter_table("dag") as batch_op:
batch_op.drop_column("next_dagrun_data_interval_start")
batch_op.drop_column("next_dagrun_data_interval_end")
| [
"[email protected]"
] | |
5d1e6afc58cdf15e0476c647d701b742ce6780f6 | 60cf82eeddce21893f06a4b76e5b0515430b3ef2 | /src/sintax/formats/criterion.py | a1e085b7288cde54d37257460d492ba2b9018613 | [
"ISC"
] | permissive | theasylum/sintax | 963e89c0984ee22b4f873d62cdac063efc6b748f | a81fdf1a891595168df53ac1d177d99eac16fb76 | refs/heads/master | 2020-06-22T15:26:33.171259 | 2019-07-19T08:46:29 | 2019-07-19T08:46:29 | 197,737,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,797 | py | """
Reads Criterion "raw.csv" files, and returns the resulting input into a
dictionary that can be further manipulated.
Criterion stores results as follows:
$PROJECT/target/criterion/{benchmark group}/{function name}/
{value passed to function (parameters)}/
Inside that folder there are two other folders, named:
- base: This is the results from the previous or first run
- new: This is the most current results from the current run
We basically have to walk the directory tree, find each of the raw.csv
files inside the new folder, and then parse that.
Criterion runs the same benchmark function multiple times, with multiple
iteration counts, mainly to get a good statistical sampling. This means the
.csv files contain 1 or more entries.
"""
import csv
import os
from pathlib import Path
def _name(group, function, value):
if value:
return f"{group}/{function}/{value}"
return f"{group}/{function}"
def _build_results(path, *, aggregate=False):
"""
Reads a CSV file and returns the results as dictionaries one by one, if the
file is not a valid Criterion .csv we ignore it, and return None
"""
with open(path, newline="") as raw_csv:
header = raw_csv.readline()
if header.strip() != "group,function,value,sample_time_nanos,iteration_count":
return
criterion_csv = csv.DictReader(
raw_csv,
fieldnames=(
"group",
"function",
"value",
"sample_time_nanos",
"iteration_count",
),
)
for row in criterion_csv:
yield {
"name": _name(row["group"], row["function"], row["value"]),
"iterations": row["iteration_count"],
# Criterion doesn't differentiate between real time that has
# expired vs the amount of CPU time used, so we just set them
# to be the same
"real_time": row["sample_time_nanos"],
"cpu_time": row["sample_time_nanos"],
"time_unit": "ns",
"run_type": "iteration",
}
def reader(path, *, aggregate=False):
"""
Yields results for reach of the raw.csv files found in the path provided.
Unfortunately the files are not collected together into a single location
by default.
"""
for root, dirs, files in os.walk(path):
if {"new", "base"} <= set(dirs):
# We are now inside a test directory where we want to read new/raw.csv
yield from _build_results(
Path(root) / "new" / "raw.csv", aggregate=aggregate
)
else: # pragma: nocover (bug in coverage)
continue
| [
"[email protected]"
] | |
753ead96f492949a999e23032b1dce59150254e9 | ba99df13d39dc8aa696c38a6c49d5193ce5c4c80 | /scripts/box3d_trpo/run_box3d_pixel_v15_tf_fred_arch.py | bf6b510b2555cccd040e0c4dd8d3f1264665f7e8 | [
"MIT"
] | permissive | fredshentu/public_model_based_controller | e5434ec075420ec5dd6d7355ba4751744a9b6248 | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | refs/heads/master | 2021-08-28T01:08:12.166349 | 2017-12-11T01:11:36 | 2017-12-11T01:11:36 | 113,795,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | py | import os
from sandbox.rocky.tf.baselines.nn_baseline import NNBaseline
from sandbox.rocky.tf.core.network import ConvNetwork
from sandbox.rocky.tf.policies.gaussian_conv_feature_policy import GaussianConvFeaturePolicy
from sandbox.rocky.tf.policies.gaussian_conv_policy import GaussianConvPolicy
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.samplers.batch_sampler import BatchSampler
from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
import itertools
import tensorflow as tf
stub(globals())
# Params range
seeds = range(0, 2)
for seed in seeds:
env = TfEnv(normalize(env=GymEnv('Box3dReachPixel-v15',record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
env_spec = env.spec
policy_cnn = ConvNetwork(
name="policy_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(5,5),(5,5),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
baseline_cnn = ConvNetwork(
name="baseline_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(5,5),(5,5),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
policy = GaussianConvFeaturePolicy(
"conv_feature_policy",
env_spec=env_spec,
feature_network=policy_cnn,
hidden_sizes=(128,64),
clip_action=False,
)
baseline = NNBaseline(
env_spec=env_spec,
feature_network=baseline_cnn,
hidden_sizes=(128,64),
hidden_nonlinearity=tf.nn.relu,
init_lr=5e-4,
n_itr=10,
train_feature_network=True,
)
batch_size = 9600
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=1000,
n_itr=1000,
step_size=0.01,
subsample_factor=1.0,
sampler_cls=BatchSampler,
optimizer_args={
'num_slices' : 8,
}
)
run_experiment_lite(
algo.train(),
exp_prefix='trpo_box3d_pixel_v15_tf_fred_arch',
n_parallel=12,
snapshot_mode="gap",
snapshot_gap=200,
seed=seed,
mode="local"
)
| [
"[email protected]"
] | |
a7792d90a0b66b5d879402881bb716e6b36e682e | 18a7c2173eb4fbb66bcc8b2ef117aad863846b83 | /etc/krri.py | 2d80dc5b94937d43053fc554347ee0e474096650 | [] | no_license | mynameiskirang/python | 5968607f6f61406e9f4b69b1a9edff31e84df153 | 4a48ea50378f9e079d0ece9110fc33afadf77434 | refs/heads/master | 2020-05-19T17:49:50.720578 | 2019-04-21T16:13:28 | 2019-04-21T16:13:28 | null | 0 | 0 | null | null | null | null | UHC | Python | false | false | 1,066 | py | import csv
col = 10
blank = 4
arr = [[""]*(col*blank) for i in range(600000)]
setting = [[""]*8 for i in range(27)]
Num = 0
for num in range(1,10):
for i in range(0, col):
for j in range(0, 10):
try:
#읽을파일
rname= "Graph"+str(num)+"_Wave_Tshort_00" + str(i) + str(j) + ".csv"
print(rname)
rf = open(rname)
Num = num
except Exception as E:
#파일이 없을시 종료
print(E)
continue
reader = csv.reader(rf, delimiter=',')
reader = list(reader)
if i==0 and j==0:
for k in range(0,27):
setting[k] = reader[k]
for k in range(27, 60027):
arr[k-27 + j*60000][i*blank +1] = reader[k][1]
f = open('Graph'+str(Num)+'_Wave_Tshort.csv', 'w', encoding='utf-8', newline='')
wf = csv.writer(f)
for i in range(0, 27):
wf.writerow(setting[i])
for i in range(0, 600000):
wf.writerow(arr[i])
| [
"[email protected]"
] | |
cfd926890b7a12f971499b3d9a97b2535364d58e | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/util/graph/Edge.pyi | 2e34bad38d5952aca6e6483e08bacaac28e839b6 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | pyi | import ghidra.util.graph
import java.lang
class Edge(object, ghidra.util.graph.KeyedObject, java.lang.Comparable):
"""
An Edge joins a pair of vertices.
The from and to vertex of an edge can not be changed.
"""
def __init__(self, from_: ghidra.util.graph.Vertex, to: ghidra.util.graph.Vertex):
"""
@param from The from or parent vertex.
@param to The to or child vertex.
"""
...
@overload
def compareTo(self, edge: ghidra.util.graph.Edge) -> int:
"""
Compare one edge to another. Based on time of creation.
"""
...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, obj: object) -> bool:
"""
Overides equals method by comparing keys.
"""
...
def from(self) -> ghidra.util.graph.Vertex:
"""
Returns from vertex.
"""
...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def key(self) -> long:
"""
Returns the key of this edge.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def to(self) -> ghidra.util.graph.Vertex:
"""
Returns to vertex.
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
859989ba770691ea96442cfcb7bbca9d2a652d36 | 9cdfe7992090fb91696eec8d0a8ae15ee12efffe | /greedy/maxMeetingsInRoom.py | c72a5d81483d96a85aa1f85718ef0ebd0ad17341 | [] | no_license | binchen15/leet-python | e62aab19f0c48fd2f20858a6a0d0508706ae21cc | e00cf94c5b86c8cca27e3bee69ad21e727b7679b | refs/heads/master | 2022-09-01T06:56:38.471879 | 2022-08-28T05:15:42 | 2022-08-28T05:15:42 | 243,564,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py |
# wrong answer. Suspect the solution is wrong
class Solution:
def maxMeetings(self, N : int, S : List[int], F : List[int]) -> List[int]:
# code here
meets = list(zip(F, S, range(1, N+1)))
meets.sort(key = lambda x: (x[0], x[1]))
ans = [meets[0]]
i = 1
while i < N:
while i < N and meets[i][1] <= ans[-1][0]:
i += 1
if i < N:
ans.append(meets[i])
i += 1
else:
break
return sorted([meet[2] for meet in ans])
| [
"[email protected]"
] | |
7d0de73ae40f72ae5e0dd9c1981cc60ab149e75e | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/arc/arc084_a.py | d067d9c30e55cb49bb016c94a255db4d323cc47e | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from bisect import bisect_left
N = int(input())
A = sorted(int(x) for x in input().split())
B = sorted(int(x) for x in input().split())
C = sorted(int(x) for x in input().split())
cnt = [0] * (N + 1)
for i, b in enumerate(B):
cnt[i + 1] = bisect_left(A, b)
cnt[i + 1] += cnt[i]
print(sum(cnt[bisect_left(B, c)] for c in C))
| [
"[email protected]"
] | |
ac79f8d7257e3bf527da58a1c4b70d913cda6521 | 39e1e256acae3fe9be4434024d42b9bb47bdd02f | /analysis/submissions/e88d1e36c4db7410802b3fda6db81d38_task1-1_1597071296/task1-1/main.py | bf7fdcd651f55cfacb57e5584ca141091694c7ef | [] | no_license | neulab/tranx-study | 9fb67b9a2181f0b362e4f97316c502eee4539b19 | e2a7089689f7f95e773e19c8f19513abe4fb8b9b | refs/heads/master | 2023-06-14T04:46:01.010892 | 2021-07-08T09:29:05 | 2021-07-08T09:29:05 | 250,357,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Example code, write your program here
import random
import string
import pprint
characters_list=[]
int_list=[]
for i in range(100):
characters_list.append(random.choice(string.ascii_lowercase))
int_list.append(random.randint(1, 21))
#print(characters_list)
#print(int_list)
dictionary = dict(zip(characters_list, int_list))
pprint.pprint(dictionary)
| [
"[email protected]"
] | |
4e452ec076db8ba7fd506905ae39e9d8ae5a789b | 0d9fda11b6f6b0cb1cf397aa45dd21cf3a612b15 | /docs/projects/oop-in-python/msdie.py | 927ff5409510a059756145ac111486be6e4d03e7 | [
"MIT"
] | permissive | milpontiangwenya/tech-department | c98eb9375558c81f022a9615b3f9c53fa2cfa4b9 | 7a4e7d7cd2675c19510cf9a80dcac504674111ce | refs/heads/master | 2022-02-07T12:46:27.927665 | 2019-07-23T06:45:05 | 2019-07-23T06:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #class definition for an n-sided die
#import packages
class MSdie:
#constructor here
#define classmethod 'roll' to roll the MSdie
#define classmethod 'getValue' to return the current value of the MSdie
#define classmethod 'setValue' to set the die to a particular value
| [
"[email protected]"
] | |
16dc363eea70d018e7688966abab2cf64ad39754 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_144/ch46_2020_04_06_19_03_41_278160.py | 359be63da56cf45e5606981e79c2a3b001d2471e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def numero_no_indice(lista):
i = 0
new_list = []
while i < len(lista):
if lista[i] == i:
new_list.append(i)
i +=1
return new_list | [
"[email protected]"
] | |
e05c00915f992b03e9d9703f593bb0844b22ee57 | ddc15592dede715b4aff97a10009d39bba76e566 | /lsnm_in_python/analysis/avg_FC_diffs_across_subjs.py | d5dbdd3821babd54263ffe896edda9a788746676 | [
"LicenseRef-scancode-public-domain"
] | permissive | xlong0513/Brain_inspired | d8a5c02da17f56d8906d5fa76efd3a757c5b5ae0 | 816c1c4fd5eb19d0463ba19c0a8c0db8465b4912 | refs/heads/master | 2022-06-01T20:44:07.366460 | 2020-05-05T10:05:36 | 2020-05-05T10:05:36 | 256,150,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,842 | py | # ============================================================================
#
# PUBLIC DOMAIN NOTICE
#
# National Institute on Deafness and Other Communication Disorders
#
# This software/database is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the author's official duties as a United States Government employee and
# thus cannot be copyrighted. This software/database is freely available
# to the public for use. The NIDCD and the U.S. Government have not placed
# any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NIDCD and the U.S. Government
# do not and cannot warrant the performance or results that may be obtained
# by using this software or data. The NIDCD and the U.S. Government disclaim
# all warranties, express or implied, including warranties of performance,
# merchantability or fitness for any particular purpose.
#
# Please cite the author in any work or product based on this material.
#
# ==========================================================================
# ***************************************************************************
#
# Large-Scale Neural Modeling software (LSNM)
#
# Section on Brain Imaging and Modeling
# Voice, Speech and Language Branch
# National Institute on Deafness and Other Communication Disorders
# National Institutes of Health
#
# This file (avg_FC_diffs_across_subjs.py) was created on December 3, 2016.
#
#
# Author: Antonio Ulloa
#
# Last updated by Antonio Ulloa on December 3 2016
#
# **************************************************************************/
#
# avg_FC_diffs_across_subjs.py
#
# Reads functional connectivity differences from input files corresponding to
# difference subjects and it calculates an average, after which it displays
# a average in matrix form as well as a histogram. We also
# calculate kurtosis and skewness of the histogram.
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import kurtosis
from scipy.stats import skew
from matplotlib import cm as CM
# declare ROI labels
labels = ['rLOF',
'rPORB',
'rFP' ,
'rMOF' ,
'rPTRI',
'rPOPE',
'rRMF' ,
'rSF' ,
'rCMF' ,
'rPREC',
'rPARC',
'rRAC' ,
'rCAC' ,
'rPC' ,
'rISTC',
'rPSTC',
'rSMAR',
'rSP' ,
'rIP' ,
'rPCUN',
'rCUN' ,
'rPCAL',
'rLOCC',
'rLING',
'rFUS' ,
'rPARH',
'rENT' ,
'rTP' ,
'rIT' ,
'rMT' ,
'rBSTS',
'rST' ,
'rTT']
# define the names of the input files where the correlation coefficients were stored
FC_diff_subj1 = 'subject_11/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj2 = 'subject_12/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj3 = 'subject_13/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj4 = 'subject_14/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj5 = 'subject_15/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj6 = 'subject_16/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj7 = 'subject_17/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj8 = 'subject_18/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj9 = 'subject_19/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj10 = 'subject_20/xcorr_diffs_TB_minus_RS.npy'
# define the names of the average fucntional connectivies are stored (for both
# task-based and resting state)
xcorr_rs_avg_file = 'rs_fc_avg.npy'
xcorr_tb_avg_file = 'tb_fc_avg.npy'
# open files that contain correlation coefficients
fc_diff_subj1 = np.load(FC_diff_subj1)
fc_diff_subj2 = np.load(FC_diff_subj2)
fc_diff_subj3 = np.load(FC_diff_subj3)
fc_diff_subj4 = np.load(FC_diff_subj4)
fc_diff_subj5 = np.load(FC_diff_subj5)
fc_diff_subj6 = np.load(FC_diff_subj6)
fc_diff_subj7 = np.load(FC_diff_subj7)
fc_diff_subj8 = np.load(FC_diff_subj8)
fc_diff_subj9 = np.load(FC_diff_subj9)
fc_diff_subj10 = np.load(FC_diff_subj10)
# open files that contain functional connectivity averages
xcorr_rs_avg = np.load(xcorr_rs_avg_file)
xcorr_tb_avg = np.load(xcorr_tb_avg_file)
# construct numpy array containing functional connectivity arrays
fc_diff = np.array([fc_diff_subj1, fc_diff_subj2, fc_diff_subj3,
fc_diff_subj4, fc_diff_subj5, fc_diff_subj6,
fc_diff_subj7, fc_diff_subj8, fc_diff_subj9,
fc_diff_subj10 ])
# now, we need to apply a Fisher Z transformation to the correlation coefficients,
# prior to averaging.
fc_diff_z = np.arctanh(fc_diff)
fc_diff_z = np.arctanh(fc_diff)
# calculate the mean of correlation coefficients across all given subjects
fc_diff_z_mean = np.mean(fc_diff_z, axis=0)
fc_diff_z_mean = np.mean(fc_diff_z, axis=0)
# now, convert back to from Z to R correlation coefficients, prior to plotting
fc_diff_mean = np.tanh(fc_diff_z_mean)
fc_diff_mean = np.tanh(fc_diff_z_mean)
#initialize new figure for correlations of Resting State mean
fig = plt.figure('Across-subject average of FC differences (TB-RS)')
ax = fig.add_subplot(111)
# apply mask to get rid of upper triangle, including main diagonal
mask = np.tri(fc_diff_mean.shape[0], k=0)
mask = np.transpose(mask)
fc_diff_mean = np.ma.array(fc_diff_mean, mask=mask) # mask out upper triangle
# plot correlation matrix as a heatmap
cmap = CM.get_cmap('jet', 10)
cmap.set_bad('w')
cax = ax.imshow(fc_diff_mean, vmin=-1, vmax=1.0, interpolation='nearest', cmap=cmap)
ax.grid(False)
plt.colorbar(cax)
# change frequency of ticks to match number of ROI labels
plt.xticks(np.arange(0, len(labels)))
plt.yticks(np.arange(0, len(labels)))
# display labels for brain regions
ax.set_xticklabels(labels, rotation=90)
ax.set_yticklabels(labels)
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# initialize new figure for histogram
fig = plt.figure('Average of FC differences (TB-RS)')
ax = fig.add_subplot(111)
# flatten the numpy cross-correlation matrix
corr_mat = np.ma.ravel(fc_diff_mean)
# remove masked elements from cross-correlation matrix
corr_mat = np.ma.compressed(corr_mat)
# plot a histogram to show the frequency of correlations
plt.hist(corr_mat, 25)
plt.xlabel('Correlation Coefficient')
plt.ylabel('Number of occurrences')
plt.axis([-1, 1, 0, 130])
# initialize new figure scatter plot of xcorr_rs average vs xcorr_tb average
fig = plt.figure()
ax = fig.add_subplot(111)
# apply mask to get rid of upper triangle, including main diagonal
mask = np.tri(xcorr_rs_avg.shape[0], k=0)
mask = np.transpose(mask)
xcorr_rs_avg = np.ma.array(xcorr_rs_avg, mask=mask) # mask out upper triangle
xcorr_tb_avg = np.ma.array(xcorr_tb_avg, mask=mask) # mask out upper triangle
# flatten the numpy cross-correlation arrays
corr_mat_RS = np.ma.ravel(xcorr_rs_avg)
corr_mat_TB = np.ma.ravel(xcorr_tb_avg)
# remove masked elements from cross-correlation arrays
corr_mat_RS = np.ma.compressed(xcorr_rs_avg)
corr_mat_TB = np.ma.compressed(xcorr_tb_avg)
# plot a scatter plot to show how averages of xcorr1 and xcorr2 correlate
plt.scatter(corr_mat_RS, corr_mat_TB)
plt.xlabel('Resting-State')
plt.ylabel('Task-Based')
plt.axis([-1,1,-1,1])
# calculate and print kurtosis
print '\nResting-State Fishers kurtosis: ', kurtosis(corr_mat, fisher=True)
print 'Resting-State Skewness: ', skew(corr_mat)
# Show the plots on the screen
plt.show()
| [
"[email protected]"
] | |
2217ee76daecfe84f81849ca7bcda17568b04510 | 7bc004d6e22ccec582f2fb7f6651a4034feff4f5 | /Interpolation/Other/New Tab with Masters of Selected Glyphs.py | 9cc9c627ea19d4987f713d9fc33c2ca3ba19dc8e | [
"Apache-2.0"
] | permissive | beppeartz/Glyphs-Scripts | d7102291b2bd6bda2680aaeadac4133ddef82327 | e248fb0701949473dfde358ee83acf3a564c9d55 | refs/heads/master | 2022-01-13T22:07:28.532747 | 2021-12-27T22:01:11 | 2021-12-27T22:01:11 | 40,445,584 | 0 | 0 | null | 2015-08-09T18:06:09 | 2015-08-09T18:06:09 | null | UTF-8 | Python | false | false | 467 | py | #MenuTitle: New Tab with Masters of Selected Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Opens a new Edit tab containing all masters of selected glyphs.
"""
import masterNavigation as nav
thisFont = Glyphs.font # frontmost font
if thisFont and thisFont.selectedLayers:
glyphNames = [l.parent.name for l in Font.selectedLayers if l.parent and l.parent.name]
nav.showAllMastersOfGlyphs( glyphNames )
| [
"[email protected]"
] | |
337be842715a37c45179d4aa5d8920ef90f7b136 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/elastic/v20200701preview/_enums.py | f335bf9639b12e949bd76fca9ff7ea773793f0e6 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 1,075 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ManagedIdentityTypes',
'MonitoringStatus',
'ProvisioningState',
'TagAction',
]
class ManagedIdentityTypes(str, Enum):
"""
Managed identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class MonitoringStatus(str, Enum):
"""
Flag specifying if the resource monitoring is enabled or disabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ProvisioningState(str, Enum):
"""
Provisioning state of the monitoring tag rules.
"""
ACCEPTED = "Accepted"
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETED = "Deleted"
NOT_SPECIFIED = "NotSpecified"
class TagAction(str, Enum):
"""
Valid actions for a filtering tag.
"""
INCLUDE = "Include"
EXCLUDE = "Exclude"
| [
"[email protected]"
] | |
32c25525e3d4a909105ca7adf465fa65d2316389 | fb909b0716f62ae118afa7d505cbcbd28f62bc63 | /venv/lib/python3.6/site-packages/tracking/migrations/0040_auto_20201031_0439.py | 3f0a6705ae023c6014d6e71b02ccf0bc7cc31a78 | [] | no_license | dkalola/JustAsk-Final | a5b951462cd3c88eb84320bb8fcf10c32f959090 | c2e7c2ffae4d3c2d870d5ba5348a6bae62db5319 | refs/heads/main | 2023-05-24T16:02:17.425251 | 2021-06-16T19:33:52 | 2021-06-16T19:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | # Generated by Django 3.1.1 on 2020-10-31 04:39
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tracking', '0039_auto_20201031_0438'),
]
operations = [
migrations.AlterField(
model_name='visitor',
name='last_update',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 31, 4, 39, 1, 444344, tzinfo=utc)),
),
migrations.AlterField(
model_name='visitor',
name='session_start',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 31, 4, 39, 1, 444306, tzinfo=utc)),
),
]
| [
"[email protected]"
] | |
8029f19217e88483f303beadc6e26e39cdb97dd0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02400/s282700233.py | 164a7ba186abced630632516a93171a862b5949b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import math
r = float(input())
S = math.pi * r**2
L = 2 * math.pi * r
print(S, L) | [
"[email protected]"
] | |
d80d5e79d8651a54d21a6514d4614a3945acca00 | 5381c2f94c9c11a0b9678378bbf0ea783f6969f8 | /calc/pycalc/safe.py | dbc124b5af8cac08b789bd2fe4c7cbe9d55af647 | [] | no_license | Bowserinator/AegisCommand | ccdcf2807e63e633bd9bb261699ff18d79bc275f | 2b226751f6302361cffef42378be4174621e372a | refs/heads/master | 2021-01-02T23:45:47.845299 | 2017-08-06T19:05:30 | 2017-08-06T19:05:30 | 99,507,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | """Safety script to delete unsafe variables in the code
Kinda useless but I guess some people would find it better to have it"""
unsafe = [
"__import__",
"import",
"decode",
"encode",
"eval",
"exec",
"open",
"sys",
"os",
"file",
"imp",
"class",
"assert",
"def",
"del",
"global",
"raise",
"with",
"while",
"return",
"yeild",
"from",
"pass",
"lambda",
"nonlocal"
]
from functions import *
from complexdecimal import ComplexDecimal
from date import Date
"""Define list of allowed functions in calc"""
safe_dict = {}
safe_dict["sin"] = sin
safe_dict["cos"] = cos
safe_dict["tan"] = tan
safe_dict["asin"] = asin
safe_dict["acos"] = acos
safe_dict["atan"] = atan
safe_dict["sinh"] = sinh
safe_dict["cosh"] = cosh
safe_dict["tanh"] = tanh
safe_dict["asinh"] = asinh
safe_dict["acosh"] = acosh
safe_dict["atanh"] = atanh
safe_dict["sqrt"] = sqrt
safe_dict["abs"] = abs
safe_dict["log"] = log
safe_dict["fact"] = factorial
safe_dict["factorial"] = factorial
safe_dict["double_fact"] = double_fact
safe_dict["ceil"] = ceil
safe_dict["floor"] = floor
safe_dict["exp"] = exp
safe_dict["ln"] = ln
safe_dict["deg"] = degree
safe_dict["rad"] = radian
safe_dict["degrees"] = degree
safe_dict["radians"] = radian
safe_dict["grad_to_rad"] = grad_to_rad
safe_dict["rad_to_grad"] = rad_to_grad
safe_dict["isPrime"] = isPrime
safe_dict["nCr"] = nCr
safe_dict["nPr"] = nPr
safe_dict["round"] = round
safe_dict["Re"] = Re
safe_dict["Im"] = Im
safe_dict["conj"] = conj
safe_dict["random"] = rand
safe_dict["uniform"] = uniform
safe_dict["gcf"] = gcf
safe_dict["gcd"] = gcf
safe_dict["hcf"] = gcf
safe_dict["lcm"] = lcm
safe_dict["factor"] = factors
safe_dict["ComplexDecimal"] = ComplexDecimal
safe_dict["Date"] = Date
safe_dict["Time"] = Date | [
"[email protected]"
] | |
6ff64b329113605c8ac4da135caca84159e6afe4 | aa5050aeccdd649730c1163e56e98bcd919e460d | /Tree/树的子结构/HasSubtree.py | dba29b7948a5854b5f3feb5909279e8b7ae2c156 | [] | no_license | darrenzhang1007/Algorithm | 482ad837fc7135ba521d73f21989d2326953d506 | 8e121225b7952ef0aa45d2d4970874be944ae93a | refs/heads/master | 2022-12-02T17:44:41.934246 | 2020-08-16T15:43:42 | 2020-08-16T15:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | # -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def HasSubtree(self, pRoot1, pRoot2):
# write code here
if pRoot2 is None or pRoot1 is None:
return False
def hasEqual(pRoot1, pRoot2):
if pRoot1 is None:
return False
if pRoot1.val == pRoot2.val:
if pRoot2.left is None:
leftEqual = True
else:
leftEqual = hasEqual(pRoot1.left, pRoot2.left)
if pRoot2.right is None:
rightEqual = True
else:
rightEqual = hasEqual(pRoot1.right, pRoot2.right)
return leftEqual and rightEqual
return False
if pRoot2.val == pRoot1.val:
ret = hasEqual(pRoot1, pRoot2)
if ret:
return True
ret = self.HasSubtree(pRoot1.left, pRoot2)
if ret:
return True
ret = self.HasSubtree(pRoot1.right, pRoot2)
return ret
if __name__ == '__main__':
t1 = TreeNode(1)
t2 = TreeNode(2)
t3 = TreeNode(3)
t4 = TreeNode(4)
t5 = TreeNode(5)
t6 = TreeNode(6)
t7 = TreeNode(7)
t8 = TreeNode(8)
t9 = TreeNode(3)
t10 = TreeNode(6)
t11 = TreeNode(7)
t1.left = t2
t1.right = t3
t2.left = t4
t2.right = t5
t3.left = t6
t3.right = t7
t6.right = t8
t9.left = t10
t9.right = t11
s = Solution()
print(s.HasSubtree(t1, t9))
| [
"[email protected]"
] | |
6687abcc4ee980ffd28c09a14f24077d5e749ae6 | 42b9bafc3c757543328d93fb60269ad4255aae17 | /cashier/resources.py | c2beeae2162a11491ed2aae2133b94bc811190ed | [
"MIT"
] | permissive | mejeng/kasir | 4fe66d1828e72b64d770426d71185cdd3c54127e | cc6f9158b61c0cb45078ddf798af9588c8771311 | refs/heads/master | 2020-09-25T03:36:10.144439 | 2019-11-30T07:59:23 | 2019-11-30T07:59:23 | 225,908,795 | 2 | 0 | MIT | 2019-12-04T16:21:15 | 2019-12-04T16:21:15 | null | UTF-8 | Python | false | false | 174 | py | from import_export import resources
from .models import DaftarTransaksi
class TransactionResources(resources.ModelResource):
class Meta:
model = DaftarTransaksi
| [
"[email protected]"
] | |
98a517e82787535c19015d8f41d39aa87c68a537 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_server_address_by_client_cidr.py | 6997dea23e00ca144b27a0e500d5e3e95206dbf1 | [] | no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ServerAddressByClientCIDR(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_cidr': 'str',
'server_address': 'str'
}
attribute_map = {
'client_cidr': 'clientCIDR',
'server_address': 'serverAddress'
}
def __init__(self, client_cidr=None, server_address=None):
"""
V1ServerAddressByClientCIDR - a model defined in Swagger
"""
self._client_cidr = None
self._server_address = None
self.discriminator = None
self.client_cidr = client_cidr
self.server_address = server_address
@property
def client_cidr(self):
"""
Gets the client_cidr of this V1ServerAddressByClientCIDR.
The CIDR with which clients can match their IP to figure out the server address that they should use.
:return: The client_cidr of this V1ServerAddressByClientCIDR.
:rtype: str
"""
return self._client_cidr
@client_cidr.setter
def client_cidr(self, client_cidr):
"""
Sets the client_cidr of this V1ServerAddressByClientCIDR.
The CIDR with which clients can match their IP to figure out the server address that they should use.
:param client_cidr: The client_cidr of this V1ServerAddressByClientCIDR.
:type: str
"""
if client_cidr is None:
raise ValueError("Invalid value for `client_cidr`, must not be `None`")
self._client_cidr = client_cidr
@property
def server_address(self):
"""
Gets the server_address of this V1ServerAddressByClientCIDR.
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.
:return: The server_address of this V1ServerAddressByClientCIDR.
:rtype: str
"""
return self._server_address
@server_address.setter
def server_address(self, server_address):
"""
Sets the server_address of this V1ServerAddressByClientCIDR.
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.
:param server_address: The server_address of this V1ServerAddressByClientCIDR.
:type: str
"""
if server_address is None:
raise ValueError("Invalid value for `server_address`, must not be `None`")
self._server_address = server_address
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServerAddressByClientCIDR):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
83d741e529035424bfb173e6f1d599799aa25c89 | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/vanilla/version-tolerant/Expected/AcceptanceTests/RequiredOptionalVersionTolerant/requiredoptionalversiontolerant/_client.py | 76b0fc3cb80ffb00dc5a9f3edc0074f398f2c82d | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 4,073 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional
from azure.core import PipelineClient
from azure.core.rest import HttpRequest, HttpResponse
from ._configuration import AutoRestRequiredOptionalTestServiceConfiguration
from ._serialization import Deserializer, Serializer
from .operations import ExplicitOperations, ImplicitOperations
class AutoRestRequiredOptionalTestService: # pylint: disable=client-accepts-api-version-keyword
"""Test Infrastructure for AutoRest.
:ivar implicit: ImplicitOperations operations
:vartype implicit: requiredoptionalversiontolerant.operations.ImplicitOperations
:ivar explicit: ExplicitOperations operations
:vartype explicit: requiredoptionalversiontolerant.operations.ExplicitOperations
:param required_global_path: number of items to skip. Required.
:type required_global_path: str
:param required_global_query: number of items to skip. Required.
:type required_global_query: str
:param optional_global_query: number of items to skip. Default value is None.
:type optional_global_query: int
:keyword endpoint: Service URL. Default value is "http://localhost:3000".
:paramtype endpoint: str
"""
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self,
required_global_path: str,
required_global_query: str,
optional_global_query: Optional[int] = None,
*,
endpoint: str = "http://localhost:3000",
**kwargs: Any
) -> None:
self._config = AutoRestRequiredOptionalTestServiceConfiguration(
required_global_path=required_global_path,
required_global_query=required_global_query,
optional_global_query=optional_global_query,
**kwargs
)
self._client: PipelineClient = PipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
self.implicit = ImplicitOperations(self._client, self._config, self._serialize, self._deserialize)
self.explicit = ExplicitOperations(self._client, self._config, self._serialize, self._deserialize)
def send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AutoRestRequiredOptionalTestService":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
| [
"[email protected]"
] | |
d2741b142ac72bf65915d1fca8e80a9bcbbb454d | f3caf3519b410f1ee98d1e55f781bb60132f211a | /sesion_4/robotai.py | 28a2c2718d3e521c57f770af2bd7a7001c1ff91c | [] | no_license | diegotriana11/python-master | 3895f3cc41cf7f0fe474b522162670ec6aaeccba | e7b654870896d2c94e2be24d5683574aaf6cb44b | refs/heads/master | 2020-09-07T16:58:02.727670 | 2016-09-25T19:41:58 | 2016-09-25T19:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from robot import Robot
import matplotlib.pyplot as plot
import math
class RobotAI(Robot):
def seguir(self, xo, yo):
ux = math.cos(self.t)
uy = math.sin(self.t)
vx = xo - self.x
vy = yo - self.y
d = ux * vx + uy * vy
r = (vx**2 + vy**2)**0.5
if d >= r:
self.girar(-0.8)
elif d < r:
self.girar(0.8)
if abs(math.acos(d / r)) <= 0.8:
self.mover(0.1)
else:
self.mover(0.01)
if __name__ == "__main__":
xo = -5
yo = 4
r = RobotAI(0, 0, 0)
# plot.ion()
while True:
# plot.clf()
plot.axis([-10, 10, -10, 10])
plot.autoscale(False)
r.seguir(xo, yo)
plot.scatter(xo, yo)
r.dibujar()
plot.pause(0.00001)
#r.log() | [
"[email protected]"
] | |
39974f6d1e114391387791611cadc1985d098091 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/data/python/kernel_tests/iterator_ops_test.py | 64eedecb5cf920e7c6b89dd951b1cb6b6e11009e | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,863 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class IteratorTest(test.TestCase):
def testAttemptingGradientsRaiseExceptions(self):
component = constant_op.constant([1])
side = constant_op.constant(0)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, component)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, side)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, [component, side])
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for i in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % i
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(inter_op_parallelism_threads=1,
use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.test_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (
np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64)
)
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.test_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int32), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64))))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.test_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3, sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40, sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element,
feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element,
feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleError(self):
dataset_int_scalar = (dataset_ops.Dataset.from_tensor_slices([1, 2,
3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.test_session() as sess:
handle_int_scalar = sess.run(
dataset_int_scalar.make_one_shot_iterator().string_handle())
handle_float_vector = sess.run(
dataset_float_vector.make_one_shot_iterator().string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.test_session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.test_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = readers.FixedLengthRecordDataset(
filenames, 1, 0, 0).make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.test_session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
def testToSingleElement(self):
skip_value = array_ops.placeholder(dtypes.int64, shape=[])
take_value = array_ops.placeholder_with_default(
constant_op.constant(1, dtype=dtypes.int64), shape=[])
dataset = (dataset_ops.Dataset.range(100)
.skip(skip_value)
.map(lambda x: x * x)
.take(take_value))
element = dataset_ops.get_single_element(dataset)
with self.test_session() as sess:
self.assertEqual(0, sess.run(element, feed_dict={skip_value: 0}))
self.assertEqual(25, sess.run(element, feed_dict={skip_value: 5}))
self.assertEqual(100, sess.run(element, feed_dict={skip_value: 10}))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Dataset was empty."):
sess.run(element, feed_dict={skip_value: 100})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Dataset had more than one element."):
sess.run(element, feed_dict={skip_value: 0, take_value: 2})
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
ec9661055806bf608d680c31602cddf4f16afaff | b42850bc3e36bbd1683070393582617f2b3cd8e6 | /Exam_16_08_20/project/software/express_software.py | db3c3bd039c77f3f14e0dd0f082a137f77802d9b | [] | no_license | marianidchenko/Python_OOP | aecca18be6df3850c0efbf2fa6d25bf3ff53ae96 | 547c12cbdad5b8c16fa55bba6c03b71db181ad2b | refs/heads/main | 2023-07-09T05:42:43.863681 | 2021-08-14T14:55:51 | 2021-08-14T14:55:51 | 381,572,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from project.software.software import Software
class ExpressSoftware(Software):
TYPE = "Express"
memory_factor = 2
def __init__(self, name, capacity_consumption, memory_consumption):
new_memory = int(memory_consumption * ExpressSoftware.memory_factor)
super().__init__(name, ExpressSoftware.TYPE, capacity_consumption, new_memory) | [
"[email protected]"
] | |
d1452747a62b51f3b744fe1e987a746fb2a010b1 | 342a1ec794df5424bfc4f6af2cb8de415068201b | /oscar_promotions/conf.py | 04a41e4cddd3b7b455d944474aaf7f48062c75e4 | [] | no_license | penta-srl/django-oscar-promotions | c5d0b159950189f23852665ce7e3b3a2fe248bd5 | 65bdf39b48409311e7284fc0a12e8b2e17f176dd | refs/heads/master | 2020-07-06T23:48:45.660316 | 2019-07-08T19:23:15 | 2019-07-08T19:23:15 | 203,176,440 | 0 | 0 | null | 2019-08-19T13:16:55 | 2019-08-19T13:16:55 | null | UTF-8 | Python | false | false | 717 | py | from oscar.core.loading import get_class
SingleProduct = get_class('oscar_promotions.models', 'SingleProduct')
RawHTML = get_class('oscar_promotions.models', 'RawHTML')
Image = get_class('oscar_promotions.models', 'Image')
PagePromotion = get_class('oscar_promotions.models', 'PagePromotion')
AutomaticProductList = get_class('oscar_promotions.models', 'AutomaticProductList')
HandPickedProductList = get_class('oscar_promotions.models', 'HandPickedProductList')
MultiImage = get_class('oscar_promotions.models', 'MultiImage')
def get_promotion_classes():
return (RawHTML, Image, SingleProduct, AutomaticProductList,
HandPickedProductList, MultiImage)
PROMOTION_CLASSES = get_promotion_classes()
| [
"[email protected]"
] | |
d906fab608afb5da22ad7279b6627e64d47fe827 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02686/s748344982.py | 09fa7e43a826d467f581733c8790139b8003bb68 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # input
N = int(input())
S = [input() for _ in range(N)]
# process
T = []
for s in S:
while '()' in s:
s = s.replace('()', '')
T.append(s)
l1 = []
l2 = []
for t in T:
op = t.find('(')
if op < 0:
op = len(t)
cl = len(t) - op
if cl+op != 0:
if cl <= op:
l1.append((cl, op))
else:
l2.append((op, cl))
l1.sort()
l2.sort(reverse=True)
result = False
x = 0
if len(l1)+len(l2) == 0:
result = True
elif len(l1)>0 and len(l2)>0 and l1[0][0]+l2[-1][0] == 0:
for cl, op in l1:
x -= cl
if x < 0:
break
x += op
if x >= 0:
for op, cl in l2:
x -= cl
if x < 0:
break
x += op
if x == 0:
result = True
# output
print("Yes" if result else "No")
| [
"[email protected]"
] | |
988a51826c16601bdc825af89f23a17af69d3daa | 9d9eb39e6adc35926d9ca5c38d5bbd05ccc6d15b | /python/binary_tree/create_balanced_btree.py | 5e53687e1bcacc998a3609384e40dcbd86054e01 | [
"Unlicense"
] | permissive | amitsaha/learning | 8121d60639f64a2a517ffb855d73de083ebfb445 | 4c1d85adf8018465716a1e8a74afadfe5f5528a2 | refs/heads/master | 2023-03-07T13:55:12.871756 | 2022-11-08T06:45:23 | 2022-11-08T06:45:23 | 25,386,786 | 6 | 4 | Unlicense | 2023-02-25T00:55:16 | 2014-10-18T04:03:55 | Python | UTF-8 | Python | false | false | 709 | py | '''
Create a balanced binary search tree from a given sorted array
'''
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def preorder(root):
print root.value
if root.left:
preorder(root.left)
if root.right:
preorder(root.right)
def create_tree(arr, start, end):
if start > end:
return
mid = int((start + end)/2.0)
root_val = arr[mid]
root = Node(root_val)
root.left = create_tree(arr, start, mid-1)
root.right = create_tree(arr, mid+1, end)
return root
root = create_tree([1, 2, 3], 0, 2)
preorder(root)
root = create_tree([1, 2, 3, 4], 0, 3)
preorder(root)
| [
"[email protected]"
] | |
289e1088e4ac6e54a7bd6c0dc0eb8024313a147a | 333fac4c6a47b2448eca1017a794c386672a4aba | /neighbourapp/migrations/0003_auto_20201103_2036.py | b285ede772a02a3b392c5fce9cc0ac7e9b5d8708 | [
"MIT"
] | permissive | mornicamwende/neighbourhood | 106412d39b55b52f6cf4fb034e54e294f5de03f9 | bf85d7afdb77b059856dc7ecc695cb79d8d1ffd0 | refs/heads/master | 2023-01-06T01:33:27.812531 | 2020-11-04T13:29:31 | 2020-11-04T13:29:31 | 308,653,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 3.1.2 on 2020-11-03 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighbourapp', '0002_auto_20201103_2026'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(upload_to='media/'),
),
]
| [
"[email protected]"
] | |
5cfe9617298832ed1cb529b6d28f06ad8c37988c | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/survey/tests/test_views.py | bcbf6f04e6125996702cb113e23943703d10372e | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 5,465 | py | """
Python tests for the Survey views
"""
import json
from collections import OrderedDict
from django.test.client import Client
from django.urls import reverse
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.survey.models import SurveyAnswer, SurveyForm
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SurveyViewsTests(ModuleStoreTestCase):
"""
All tests for the views.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super().setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = UserFactory.create(username='student', email='[email protected]', password=self.password)
self.test_survey_name = 'TestSurvey'
self.test_form = '''
<input name="field1" /><input name="field2" /><select name="ddl"><option>1</option></select>
<textarea name="textarea" />
'''
self.student_answers = OrderedDict({
'field1': 'value1',
'field2': 'value2',
'ddl': '1',
'textarea': 'textarea'
})
self.course = CourseFactory.create(
display_name='Test Course',
course_survey_required=True,
course_survey_name=self.test_survey_name
)
self.survey = SurveyForm.create(self.test_survey_name, self.test_form)
self.view_url = reverse('view_survey', args=[self.test_survey_name])
self.postback_url = reverse('submit_answers', args=[self.test_survey_name])
self.client.login(username=self.student.username, password=self.password)
def test_unauthenticated_survey_view(self):
"""
Asserts that an unauthenticated user cannot access a survey
"""
anon_user = Client()
resp = anon_user.get(self.view_url)
assert resp.status_code == 302
def test_survey_not_found(self):
"""
Asserts that if we ask for a Survey that does not exist, then we get a 302 redirect
"""
resp = self.client.get(reverse('view_survey', args=['NonExisting']))
assert resp.status_code == 302
def test_authenticated_survey_view(self):
"""
Asserts that an authenticated user can see the survey
"""
resp = self.client.get(self.view_url)
# is the SurveyForm html present in the HTML response?
self.assertContains(resp, self.test_form)
def test_unauthenticated_survey_postback(self):
"""
Asserts that an anonymous user cannot answer a survey
"""
anon_user = Client()
resp = anon_user.post(
self.postback_url,
self.student_answers
)
assert resp.status_code == 302
def test_survey_postback_to_nonexisting_survey(self):
"""
Asserts that any attempts to post back to a non existing survey returns a 404
"""
resp = self.client.post(
reverse('submit_answers', args=['NonExisting']),
self.student_answers
)
assert resp.status_code == 404
def test_survey_postback(self):
"""
Asserts that a well formed postback of survey answers is properly stored in the
database
"""
resp = self.client.post(
self.postback_url,
self.student_answers
)
assert resp.status_code == 200
data = json.loads(resp.content.decode('utf-8'))
assert 'redirect_url' in data
answers = self.survey.get_answers(self.student)
assert answers[self.student.id] == self.student_answers
def test_strip_extra_fields(self):
"""
Verify that any not expected field name in the post-back is not stored
in the database
"""
data = dict.copy(self.student_answers)
data['csrfmiddlewaretoken'] = 'foo'
data['_redirect_url'] = 'bar'
data['course_id'] = str(self.course.id)
resp = self.client.post(
self.postback_url,
data
)
assert resp.status_code == 200
answers = self.survey.get_answers(self.student)
assert 'csrfmiddlewaretoken' not in answers[self.student.id]
assert '_redirect_url' not in answers[self.student.id]
assert 'course_id' not in answers[self.student.id]
# however we want to make sure we persist the course_id
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=self.survey
)
for answer_obj in answer_objs:
assert str(answer_obj.course_key) == data['course_id']
def test_encoding_answers(self):
"""
Verify that if some potentially harmful input data is sent, that is is properly HTML encoded
"""
data = dict.copy(self.student_answers)
data['field1'] = '<script type="javascript">alert("Deleting filesystem...")</script>'
resp = self.client.post(
self.postback_url,
data
)
assert resp.status_code == 200
answers = self.survey.get_answers(self.student)
assert '<script type="javascript">alert("Deleting filesystem...")</script>' ==\
answers[self.student.id]['field1']
| [
"[email protected]"
] | |
c182670425a76e290a3d131e32c3fce1768e54e4 | 9c718b8964d476db4728fc0cf18e24292dd8cf60 | /MxOnline/MxOnline/urls.py | b6f6b97c405f6cd71efc76a9ad34b390a91b32ce | [] | no_license | 1400720231/Django-Projects | 960f9226e0f5c01628afd65b9a78e810fdeb1b83 | 72f96788163f7ffe76e7599966ddbfa1d2199926 | refs/heads/master | 2021-06-25T17:41:14.147011 | 2019-04-03T02:24:38 | 2019-04-03T02:24:38 | 114,955,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | """MxOnline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
import xadmin
from django.views.generic import TemplateView # 专门处理静态文件的View
from django.views.static import serve # 处理静态文件的
from users.views import LoginView, RegisterView, ActiveUserView, ForgetPwdViws, ResetView, ModifyPwdView, LogoutView
from users.views import IndexView
from MxOnline.settings import MEDIA_ROOT
# STATIC_ROOT
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^logout/$', LogoutView, name='logout'),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^register/$', RegisterView.as_view(), name='register'),
url(r'^captcha/', include('captcha.urls')),
url(r'^active/(?P<active_code>.*)/$', ActiveUserView.as_view(), name='user_active'), # 邮箱激活的url
url(r'^forget/$', ForgetPwdViws.as_view(), name='forget_pwd'),
url(r'^reset/(?P<active_code>.*)/$', ResetView.as_view(), name='reset_pwd'), # 重置的get方法url
url(r'^modify/$', ModifyPwdView.as_view(), name='modify_pwd'),
# 机构课程url配置
url(r'^org/', include('organization.urls', namespace='org')),
# 课程相关url配置
url(r'^course/', include('courses.urls', namespace='course')),
# 配置上传访问文件
url(r'^media/(?P<path>.*)/$', serve, {'document_root': MEDIA_ROOT}),
# debug=false 时配置static文件 访问地址
# url(r'^static/(?P<path>.*)/$', serve, {'document_root': STATIC_ROOT}),
# user.views
url(r'^users/', include('users.urls', namespace='users'))
]
"""
1> handler404配置
全局404 配置, 名字是固定写法,django会自动识别的:handler404
2> 处理404状态码的视图函数配置
def page_not_found(request):
from django.shortcuts import render_to_response
response = render_to_response('404.html')
response.status_code = 404
return response
3> setting.py下记得把DEBUG=True 改为False
不然输入不存在的访问地址的时候2>中的函数无效。返回不了2>中函数的'404.html页面'
4> ALLOWED_HOSTS = ['*']
DEBUG = False的时候必须设置ALLOWED_HOSTS参数(原来为ALLOWED_HOSTS=[]),
这里的'*'表示所有客户端都可以访问
5> 静态文件重新访问服务配置
当DEBUG=False的时候,你会发现所有没有了css样式,因为此时django不会再帮你默认管理
这些样式文件了,一般来讲都是配置是再Apache,或者nginx上面的,所以我们像meida_root那样
配置serve函数
"""
# 全局400 页面函数,
handler404 = 'users.views.page_not_found'
# 全局500 页面函数
handler500 = 'users.views.page_error'
| [
"[email protected]"
] | |
e56150b56fb4a814390034b0c1b3f2d090ca6f21 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /appsync_write_f/resource_tag.py | e11a58114a16cee65546f618dc4f14e7184cc18e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
untag-resource : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appsync/untag-resource.html
"""
write_parameter("appsync", "tag-resource") | [
"[email protected]"
] | |
adc32cb60c226e739cab0037aec5df3d8b0ede09 | 0b5c6244ff93d6bac085fe0309961e0ce5e8c004 | /conanfile.py | 920fe9a2c6a788aa79bf1fd9229e60e1447b50ad | [
"MIT"
] | permissive | jgsogo/conan-cpython | b46ee2b82e157a5035caf0fa42bc8e1f420f0432 | 12526431898cc4002e78ac0f12a1c2c19c45fab6 | refs/heads/master | 2020-05-18T13:32:58.670959 | 2019-05-01T16:07:12 | 2019-05-01T16:07:12 | 184,443,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | from conans import ConanFile, tools, AutoToolsBuildEnvironment
import shutil
import os
class CPython(ConanFile):
name = "cpython"
version = "3.7.3"
settings = "os", "arch", "compiler", "build_type"
def source(self):
url = "https://github.com/python/cpython/archive/v{v}.tar.gz".format(v=self.version)
tools.get(url)
shutil.move("cpython-{v}".format(v=self.version), self.name)
# Patch some Python modules to ensure 'is_python_build' returns True
tools.replace_in_file(os.path.join(self.source_folder, self.name, "Lib", "sysconfig.py"),
"_sys_home = getattr(sys, '_home', None)",
"_sys_home = None # Force it (we are calling this script from installed python)")
def build(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.configure(configure_dir=self.name)
autotools.make()
#autotools.install()
def package(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.install()
def package_info(self):
self.cpp_info.libs = ["python3.7m", "intl", ] # TODO: Handle intl
self.cpp_info.includedirs = ["include/python3.7m", ]
| [
"[email protected]"
] | |
606160379df1371f13cbf233b2360cdc0625e5e2 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-OO-Python/14-multiple-inheritance_20200417235022.py | fa05f62062a5c7bdc540b7d8c3135b15d4cb001f | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #multiple inheritance
class User(object):
def __init__ (self, name, power):
self.name = name
self.power = power
print('init complete')
def attack(self):
print(f'attacking {self.power} power')
class Wizard(User):
def __init__(self, name, power, ):
super().__init__(email)
self.name = name
self.power = power
wizard1 = Wizard('Merlin', 50, '[email protected]' )
# print(wizard1.email) | [
"[email protected]"
] | |
c78331a1949f81f08f8e919c0054d33c36185ece | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_272/ch25_2019_03_22_10_51_48_563237.py | edf748e7c17e8fc0272a7e4d4c199205c36332a8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | distância=float(input("Digite a distância a percorrer:"))
if distância <= 200:
passagem = 0.5 * distância
else:
passagem = 0.45 * distância
print("Preço da passagem: R$ %7.2f" % passagem)
| [
"[email protected]"
] | |
fa2d8b37914f559d00865c70f5c4ec389187296c | ecf735569d6128e4ff4191335752385b47062084 | /app/urls.py | 8a23059cfa48b7a94f586908cf1571fd1d80455e | [
"Apache-2.0"
] | permissive | silverbackhq/silverback | 1e8dc26a22c9439a19e922e624dda546120b8262 | fcdeccd6f58abed2a10be7b34b2e2bfc954499bb | refs/heads/master | 2023-08-17T20:51:37.910446 | 2020-07-24T13:41:35 | 2020-07-24T13:41:35 | 163,096,706 | 44 | 11 | Apache-2.0 | 2023-05-28T11:37:48 | 2018-12-25T16:20:08 | Python | UTF-8 | Python | false | false | 18,468 | py | # Copyright 2019 Silverbackhq
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Third Party Library
from django.urls import include, path
# Local Library
from app.controllers.web.status_page import StatusPageIndex as StatusPageIndexView
from app.controllers.web.status_page import StatusPageHistory as StatusPageHistoryView
from app.controllers.web.status_page import StatusPageSingle as StatusPageSingleView
from app.controllers.web.install import Install as InstallView
from app.controllers.web.not_found import handler404 as handler404_view
from app.controllers.web.error import handler500 as handler500_view
from app.controllers.web.login import Login as LoginView
from app.controllers.web.register import Register as RegisterView
from app.controllers.web.forgot_password import ForgotPassword as ForgotPasswordView
from app.controllers.web.reset_password import ResetPassword as ResetPasswordView
from app.controllers.web.statistics import Statistics as StatisticsView
from app.controllers.web.history import AtomHistory as AtomHistoryView
from app.controllers.web.history import RssHistory as RssHistoryView
from app.controllers.web.health_check import HealthCheck as HealthCheckView
from app.controllers.web.admin.logout import Logout as LogoutView
from app.controllers.web.admin.dashboard import Dashboard as DashboardView
from app.controllers.web.admin.profile import Profile as ProfileView
from app.controllers.web.admin.settings import Settings as SettingsView
from app.controllers.web.admin.builder import Builder as BuilderView
from app.controllers.web.admin.activity import Activity as ActivityView
from app.controllers.web.admin.notification import Notification as NotificationView
from app.controllers.web.admin.user import UserList as UserListWeb
from app.controllers.web.admin.user import UserEdit as UserEditWeb
from app.controllers.web.admin.user import UserAdd as UserAddWeb
from app.controllers.web.admin.component import ComponentList as ComponentListView
from app.controllers.web.admin.component import ComponentAdd as ComponentAddView
from app.controllers.web.admin.component import ComponentEdit as ComponentEditView
from app.controllers.web.admin.component_group import ComponentGroupList as ComponentGroupListView
from app.controllers.web.admin.component_group import ComponentGroupAdd as ComponentGroupAddView
from app.controllers.web.admin.component_group import ComponentGroupEdit as ComponentGroupEditView
from app.controllers.web.admin.incident import IncidentList as IncidentListView
from app.controllers.web.admin.incident import IncidentAdd as IncidentAddView
from app.controllers.web.admin.incident import IncidentEdit as IncidentEditView
from app.controllers.web.admin.incident import IncidentView as IncidentViewView
from app.controllers.web.admin.incident_update import IncidentUpdateAdd as IncidentUpdateAddView
from app.controllers.web.admin.incident_update import IncidentUpdateEdit as IncidentUpdateEditView
from app.controllers.web.admin.incident_update import IncidentUpdateView as IncidentUpdateViewView
from app.controllers.web.admin.metric import MetricList as MetricListView
from app.controllers.web.admin.metric import MetricAdd as MetricAddView
from app.controllers.web.admin.metric import MetricEdit as MetricEditView
from app.controllers.web.admin.subscriber import SubscriberList as SubscriberListView
from app.controllers.web.admin.subscriber import SubscriberAdd as SubscriberAddView
from app.controllers.web.admin.subscriber import SubscriberEdit as SubscriberEditView
from app.controllers.api.private.v1.install import Install as InstallV1EndpointPrivate
from app.controllers.api.private.v1.status import StatusSubscribe as StatusSubscribeV1EndpointPrivate
from app.controllers.api.private.v1.login import Login as LoginV1EndpointPrivate
from app.controllers.api.private.v1.register import Register as RegisterV1EndpointPrivate
from app.controllers.api.private.v1.forgot_password import ForgotPassword as ForgotPasswordV1EndpointPrivate
from app.controllers.api.private.v1.reset_password import ResetPassword as ResetPasswordV1EndpointPrivate
from app.controllers.api.private.v1.admin.settings import Settings as SettingsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.profile import Profile as ProfileAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.notifications import Notifications as NotificationsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.notifications import LatestNotifications as LatestNotificationsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.user import User as UserAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.user import Users as UsersAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.component_group import ComponentGroup as ComponentGroupAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.component_group import ComponentGroups as ComponentGroupsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.component import Component as ComponentAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.component import Components as ComponentsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident import Incident as IncidentAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident import Incidents as IncidentsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident_update import IncidentUpdate as IncidentUpdateAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident_update import IncidentUpdates as IncidentUpdatesAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident_update import IncidentUpdatesNotify as IncidentUpdatesNotifyAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident_update import IncidentUpdatesComponents as IncidentUpdatesComponentsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.incident_update import IncidentUpdatesComponent as IncidentUpdatesComponentAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.metric import Metric as MetricAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.metric import Metrics as MetricsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.metric import NewRelicApps as NewRelicAppsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.subscriber import Subscriber as SubscriberAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.subscriber import Subscribers as SubscribersAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.activity import Activities as ActivitiesAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.builder import BuilderSettings as BuilderSettingsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.builder import BuilderComponents as BuilderComponentsAdminV1EndpointPrivate
from app.controllers.api.private.v1.admin.builder import BuilderSystemMetrics as BuilderSystemMetricsAdminV1EndpointPrivate
urlpatterns = [
# Public Views
path('', StatusPageIndexView.as_view(), name='app.web.status_page_index'),
path('history/<int:period>', StatusPageHistoryView.as_view(), name='app.web.status_page_history'),
path('incidents/<uri>', StatusPageSingleView.as_view(), name='app.web.status_page_single'),
path('install', InstallView.as_view(), name='app.web.install'),
path('login', LoginView.as_view(), name='app.web.login'),
path('register/<token>', RegisterView.as_view(), name='app.web.register'),
path('forgot-password', ForgotPasswordView.as_view(), name='app.web.forgot_password'),
path('reset-password/<token>', ResetPasswordView.as_view(), name='app.web.reset_password'),
path('statistics/<type>', StatisticsView.as_view(), name='app.web.statistics'),
path('history.atom', AtomHistoryView.as_view(), name='app.web.history_atom'),
path('history.rss', RssHistoryView.as_view(), name='app.web.history_rss'),
path('_healthcheck', HealthCheckView.as_view(), name='app.web.health_check'),
# Authenticated Users Views
path('admin/', include([
path('logout', LogoutView.as_view(), name='app.web.admin.logout'),
path('dashboard', DashboardView.as_view(), name='app.web.admin.dashboard'),
path('profile', ProfileView.as_view(), name='app.web.admin.profile'),
path('activity', ActivityView.as_view(), name='app.web.admin.activity.list'),
path('notifications', NotificationView.as_view(), name='app.web.admin.notification.list'),
path('settings', SettingsView.as_view(), name='app.web.admin.settings'),
path('builder', BuilderView.as_view(), name='app.web.admin.builder'),
path('users', UserListWeb.as_view(), name='app.web.admin.user.list'),
path('users/add', UserAddWeb.as_view(), name='app.web.admin.user.add'),
path('users/edit/<int:user_id>', UserEditWeb.as_view(), name='app.web.admin.user.edit'),
path('components', ComponentListView.as_view(), name='app.web.admin.component.list'),
path('components/add', ComponentAddView.as_view(), name='app.web.admin.component.add'),
path('components/edit/<int:component_id>', ComponentEditView.as_view(), name='app.web.admin.component.edit'),
path('component-groups', ComponentGroupListView.as_view(), name='app.web.admin.component_group.list'),
path('component-groups/add', ComponentGroupAddView.as_view(), name='app.web.admin.component_group.add'),
path('component-groups/edit/<int:group_id>', ComponentGroupEditView.as_view(), name='app.web.admin.component_group.edit'),
path('incidents', IncidentListView.as_view(), name='app.web.admin.incident.list'),
path('incidents/add', IncidentAddView.as_view(), name='app.web.admin.incident.add'),
path('incidents/edit/<int:incident_id>', IncidentEditView.as_view(), name='app.web.admin.incident.edit'),
path('incidents/view/<int:incident_id>', IncidentViewView.as_view(), name='app.web.admin.incident.view'),
path('incidents/view/<int:incident_id>/updates/add', IncidentUpdateAddView.as_view(), name='app.web.admin.incident_update.add'),
path('incidents/view/<int:incident_id>/updates/edit/<int:update_id>', IncidentUpdateEditView.as_view(), name='app.web.admin.incident_update.edit'),
path('incidents/view/<int:incident_id>/updates/view/<int:update_id>', IncidentUpdateViewView.as_view(), name='app.web.admin.incident_update.view'),
path('metrics', MetricListView.as_view(), name='app.web.admin.metric.list'),
path('metrics/add', MetricAddView.as_view(), name='app.web.admin.metric.add'),
path('metrics/edit/<int:metric_id>', MetricEditView.as_view(), name='app.web.admin.metric.edit'),
path('subscribers', SubscriberListView.as_view(), name='app.web.admin.subscriber.list'),
path('subscribers/add', SubscriberAddView.as_view(), name='app.web.admin.subscriber.add'),
path('subscribers/edit/<int:subscriber_id>', SubscriberEditView.as_view(), name='app.web.admin.subscriber.edit'),
])),
# Private API V1 Endpoints
path('api/private/v1/', include([
path('status_subscribe', StatusSubscribeV1EndpointPrivate.as_view(), name='app.api.private.v1.status_subscribe.endpoint'),
path('install', InstallV1EndpointPrivate.as_view(), name='app.api.private.v1.install.endpoint'),
path('login', LoginV1EndpointPrivate.as_view(), name='app.api.private.v1.login.endpoint'),
path('register', RegisterV1EndpointPrivate.as_view(), name='app.api.private.v1.register.endpoint'),
path('forgot-password', ForgotPasswordV1EndpointPrivate.as_view(), name='app.api.private.v1.forgot_password.endpoint'),
path('reset-password', ResetPasswordV1EndpointPrivate.as_view(), name='app.api.private.v1.reset_password.endpoint'),
path('admin/', include([
path(
'settings',
SettingsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.settings.endpoint'
),
path(
'profile',
ProfileAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.profile.endpoint'
),
path(
'notification',
NotificationsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.notifications.endpoint'
),
path(
'latest_notifications',
LatestNotificationsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.latest_notifications.endpoint'
),
path(
'user',
UsersAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.users.endpoint'
),
path(
'user/<int:user_id>',
UserAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.user.endpoint'
),
path(
'component-group',
ComponentGroupsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.component_groups.endpoint'
),
path(
'component-group/<int:group_id>',
ComponentGroupAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.component_group.endpoint'
),
path(
'component',
ComponentsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.components.endpoint'
),
path(
'component/<int:component_id>',
ComponentAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.component.endpoint'
),
path(
'incident',
IncidentsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incidents.endpoint'
),
path(
'incident/<int:incident_id>',
IncidentAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident.endpoint'
),
path(
'incident-update/<int:incident_id>',
IncidentUpdatesAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident_updates.endpoint'
),
path(
'incident-update/<int:incident_id>/<int:update_id>',
IncidentUpdateAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident_update.endpoint'
),
path(
'incident-update/<int:incident_id>/<int:update_id>/notify',
IncidentUpdatesNotifyAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident_update.notify.endpoint'
),
path(
'incident-update/<int:incident_id>/<int:update_id>/components',
IncidentUpdatesComponentsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident_update.components.endpoint'
),
path(
'incident-update/<int:incident_id>/<int:update_id>/component/<int:item_id>',
IncidentUpdatesComponentAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.incident_update.component.endpoint'
),
path(
'metric',
MetricsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.metrics.endpoint'
),
path(
'metric/<int:metric_id>',
MetricAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.metric.endpoint'
),
path(
'subscriber',
SubscribersAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.subscribers.endpoint'
),
path(
'subscriber/<int:subscriber_id>',
SubscriberAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.subscriber.endpoint'
),
path(
'activity',
ActivitiesAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.activities.endpoint'
),
path(
'action/metric/new-relic-apps',
NewRelicAppsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.metric.action.new_relic_apps.endpoint'
),
path(
'builder/settings',
BuilderSettingsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.builder.settings.endpoint'
),
path(
'builder/component',
BuilderComponentsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.builder.components.endpoint'
),
path(
'builder/component/<component_id>',
BuilderComponentsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.builder.component.endpoint'
),
path(
'builder/metric',
BuilderSystemMetricsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.builder.metrics.endpoint'
),
path(
'builder/metric/<metric_id>',
BuilderSystemMetricsAdminV1EndpointPrivate.as_view(),
name='app.api.private.v1.admin.builder.metric.endpoint'
),
]))
])),
# Public API V1 Endpoints
path('api/public/v1/', include([
]))
]
handler404 = handler404_view
handler500 = handler500_view
| [
"[email protected]"
] | |
b25aabcca727963ff09e4b3871e95ec4ca64f57f | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/carts/views_20211013002410.py | cfd3366c7921be447536d4c99af00755e78511bb | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,133 | py | from django.shortcuts import render, redirect, get_object_or_404
from store.models import Product,Variation
from .models import Cart, CartItem
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def _cart_id(request):
cart = request.session.session_key
if not cart:
cart = request.session.create()
return cart
def add_cart(request, product_id):
product = Product.objects.get(id=product_id)
product_variation = []
if request.method == 'POST':
for item in request.POST:
key = item
value = request.POST[key]
try:
variation = Variation.objects.get(product=product, variation_category__iexact=key, variation_value__iexact=value)
product_variation.append(variation)
except:
pass
try:
cart = Cart.objects.get(cart_id=_cart_id(request)) # get the cart using the cart_id present in the session
except Cart.DoesNotExist:
cart = Cart.objects.create(
cart_id = _cart_id(request)
)
cart.save()
is_cart_item_exists = CartItem.objects.filter(product=product, cart=cart).exists()
if is_cart_item_exists:
cart_item = CartItem.objects.filter(product=product, cart=cart)
# existing_variations -> database
# current variation -> product_variation
# item_id -> database
ex_var_list = []
id = []
for item in cart_item:
existing_variation = item.variations.all()
ex_var_list.append(list(existing_variation))
id.append(item.id)
print(ex_var_list)
if product_variation in ex_var_list:
# increase the cart item quantity
index = ex_var_list.index(product_variation)
item_id = id[index]
item = CartItem.objects.get(product=product, id=item_id)
item.quantity += 1
item.save()
else:
item = CartItem.objects.create(product=product, quantity=1, cart=cart)
if len(product_variation) > 0:
item.variations.clear()
item.variations.add(*product_variation)
item.save()
else:
cart_item = CartItem.objects.create(
product = product,
quantity = 1,
cart = cart,
)
if len(product_variation) > 0:
cart_item.variations.clear()
cart_item.variations.add(*product_variation)
cart_item.save()
return redirect('cart')
def cart(request, total=0, quantity=0, cart_items=None):
try:
tax = 0;
grand_total = 0;
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_items = CartItem.objects.filter(cart=cart, is_active=True)
for cart_item in cart_items:
total += (cart_item.product.price * cart_item.quantity)
quantity += cart_item.quantity
tax = (2 * total)/100
grand_total = total + tax
except ObjectDoesNotExist:
pass
context = {
'total': total,
'quantity': quantity,
'cart_items': cart_items,
'tax': tax,
'grand_total': grand_total,
}
return render(request, 'store/cart.html', context)
def remove_cart(request, product_id, cart_item_id):
cart = Cart.objects.get(cart_id=_cart_id(request))
product = get_object_or_404(Product, id=product_id)
cart_item = CartItem.objects.get(product=product, cart=cart, id=cart_item_id)
if cart_item.quantity > 1:
cart_item.quantity -= 1
cart_item.save()
else:
cart_item.delete()
return redirect('cart')
def remove_cart_item(request, product_id):
product = get_object_or_404(Product, id=product_id)
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_item = CartItem.objects.get(product=product, cart=cart)
cart_item.delete()
return redirect('cart')
| [
"[email protected]"
] | |
1c89973d235eef09b513d16069b27ad2c29ff8cd | b009fa634b04177b77a6cc7234dcc329d569e450 | /Connections/ConnectionGroup.py | 1a265ec27f14bbcbad6bed9a1c98f28388838b64 | [] | no_license | SimLeek/HTM-Filetype | 5b873c5c97bbe73d0876ad7c15ff90cf7a8d071c | 51b299c0aa59148d2f8b69bc540d096f5a9ad8f2 | refs/heads/master | 2020-12-09T00:11:24.430006 | 2016-11-25T19:07:28 | 2016-11-25T19:07:28 | 67,306,291 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,707 | py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from Columns.ColumnGroup import Column
from Columns.Util.Constants import EPSILON
from collections import defaultdict
import flatbuffers
import Buffers.connectiongroup.BBox as BBox
import Buffers.connectiongroup.ConnectionGroup as ConnectionGroup
import Buffers.connectiongroup.FloatBBox as FloatBBox
import Buffers.connectiongroup.IntBBox as IntBBox
import Buffers.Column.Column as neuralColumn
import Buffers.Column.Cell as neuronCell
import Buffers.Column.FloatBBox as ColumnFloatBBox
import Buffers.Column.IntBBox as ColumnIntBBox
import Buffers.Column.BBox as ColumnBBox
import Buffers.Column.Segment as CellSegment
import Buffers.Column.Synapse as CellSynapse
from n_d_point_field import n_dimensional_n_split, n_dimensional_n_split_float
# note: generate UIDs as randint(min_int,max_int) and check if in hash_table
class Connections(object):
""" Class to hold data representing the connectivity of a collection of cells. """
def __init__(self,
num_columns,
bbox,
max_cells_per_column=16,
max_segments_per_cell=255,
max_synapses_per_segment=255,
location_type="int",
uid=0,
start_with_no_columns=False,
cell_learning_height=int("inf")):
""" @param num_columns (int) Number of cells in collection """
# check argument validity
assert max_cells_per_column > 0
assert max_segments_per_cell > 0
assert max_synapses_per_segment > 0
assert isinstance(max_cells_per_column, int)
assert isinstance(max_segments_per_cell, int)
assert isinstance(max_synapses_per_segment, int)
# save member variables
self.numColumns = num_columns
self.maxCellsPerColumn = max_cells_per_column
self.maxSegmentsPerCell = max_segments_per_cell
self.maxSynapsesPerSegment = max_synapses_per_segment
# save class variables
self.locationType = location_type
self.UID = uid
self.bbox = bbox
if not start_with_no_columns:
if location_type == "int":
self._columnLocations = n_dimensional_n_split(bbox, num_columns)
elif location_type == "float":
self._columnLocations = n_dimensional_n_split_float(bbox, num_columns)
points = (self._columnLocations.intersection(bbox, objects=True))
self._cells = dict([(point.id, Column(point.bbox, self)) for point in points])
self.columnUIDcounter = num_columns
else:
self._columnLocations = n_dimensional_n_split(bbox, 0)
self._cells = dict()
self.columnUIDcounter = 0
self._synapsesForPresynapticCell = defaultdict(set)
self._segmentForUID = dict()
self._numSynapses = 0
self._freeUIDs = []
self._nextUID = 0
self._iteration = 0
# Whenever creating a new Synapse or Segment, give it a unique ordinal.
# These can be used to sort synapses or segments by age.
self._nextSynapseOrdinal = long(0)
self._nextSegmentOrdinal = long(0)
def addColumn(self, columnUID, locationBBox):
if self._cells[columnUID] == None:
self._cells[columnUID] = Column(locationBBox, self, self._iteration)
else:
raise ValueError("Cell location already used.")
self.columnUIDcounter = columnUID + 1
self._columnLocations.insert(columnUID, locationBBox, self._iteration)
self.numColumns += 1
def addColumnFromFile(self, filename):
flat_buffer = open(filename, 'rb').read()
flat_buffer = bytearray(flat_buffer)
self.addColumnFromBuf(flat_buffer)
def addColumnFromBuf(self, flat_buffer):
column = neuralColumn.Column.GetRootAsColumn(flat_buffer, 0)
if column.ConnectionsUID() != self.UID:
raise RuntimeWarning(
"Cell connections UID [" + str(cell.ConnectionsUID()) +
"] does not match connections UID [" + str(self.UID) + "].")
columnLocationBBox = None
if column.PositionType() == ColumnBBox.BBox.FloatBBox:
if self.locationType != "float":
raise ValueError (
"Column not of position type: "
+ self.locationType
+ ". Column may need to be projected.")
pos_union = ColumnFloatBBox.FloatBBox()
pos_union.Init(column.Position().Bytes, column.Position().Pos)
columnLocationBBox = []
for i in xrange(pos_union.CoordinatesLength()):
columnLocationBBox.append(pos_union.Coordinates(i))
elif column.PositionType() == ColumnBBox.BBox.IntBBox:
if self.locationType != "int":
raise ValueError("Neuron not of position type: "
+ self.locationType
+ ". Neuron may need to be projected.")
pos_union = ColumnIntBBox.IntBBox()
pos_union.Init(column.Position().Bytes, column.Position().Pos)
columnLocationBBox = []
for i in xrange(pos_union.CoordinatesLength()):
columnLocationBBox.append(pos_union.Coordinates(i))
#todo: add option to update iteration or keep old iteration
columnData = Column(columnLocationBBox, self, column.LastUsedIteration())
def addCellFromFile(self, filename):
buf = open(filename, 'rb').read()
buf = bytearray(buf)
cell = neuronCell.Cell.GetRootAsCell(buf, 0)
cellData = CellData(cellLocationBBox, cell.LastUsedIteration())
self.addCell(cellData, cell.UID(), cellLocationBBox)
# todo: add option for breaking class limits for max synapses/segments
for i in xrange(cell.SegmentsLength()):
self.createSegment(cell.UID())
cellData._segments[-1]._lastUsedIteration = cell.Segments(i).LastUsedIteration()
for j in xrange(cell.Segments(i).SynapsesLength()):
syn = cell.Segments(i).Synapses(j)
self.createSynapse(cellData._segments[-1], syn.PresynapticCellUID(), syn.Permanence())
def writeCellToFile(self, cell, filename, destroy=True):
builder = flatbuffers.Builder(0)
# todo: refactor postype and other incorrectly cased includes
loc, postype = None
if self.locationType == "float":
FloatPos.FloatPosStartCoordinatesVector(builder, len(self._cells[cell]._bbox))
for i in reversed(range(0, len(self._cells[cell]._bbox))):
builder.PrependFloat32(self._cells[cell]._bbox[i])
loc = builder.EndVector(len(self._cells[cell]._bbox))
postype = posType.Pos.FloatPos
elif self.locationType == "int":
IntPos.IntPosStartCoordinatesVector(builder, len(self._cells[cell]._bbox))
for i in reversed(range(0, len(self._cells[cell]._bbox))):
builder.PrependUint32(self._cells[cell]._bbox[i])
loc = builder.EndVector(len(self._cells[cell]._bbox))
postype = posType.Pos.IntPos
segments = []
for segment in self._cells[cell]._segments:
synapses = []
for synapse in segment._synapses:
CellSynapse.SynapseStart(builder)
CellSynapse.SynapseAddPresynapticCellUID(builder, synapse.presynapticCell)
CellSynapse.SynapseAddPermanence(builder, synapse.permanence)
synapses.append(CellSynapse.SynapseEnd(builder))
CellSegment.SegmentStartSynapsesVector(builder, len(synapses))
for i in reversed(range(0, len(synapses))):
builder.PrependUOffsetTRelative(synapses[i])
segmentSynapses = builder.EndVector(len(synapses))
CellSegment.SegmentStart(builder)
CellSegment.SegmentAddSynapses(builder, segmentSynapses)
CellSegment.SegmentAddLastUsedIteration(builder, segment._lastUsedIteration)
segments.append(CellSegment.SegmentEnd(builder))
neuronCell.CellStartSegmentsVector(builder, len(segments))
for i in reversed(range(0, len(segments))):
builder.PrependUOffsetTRelative(segments[i])
cellSegments = builder.EndVector(len(segments))
neuronCell.CellStart(builder)
neuronCell.CellAddPositionType(builder, postype)
neuronCell.CellAddPosition(builder, loc)
neuronCell.CellAddLastUsedIteration(builder, self._cells[cell]._lastUsedIteration)
neuronCell.CellAddUID(builder, cell)
neuronCell.CellAddConnectionsUID(builder, self.UID)
neuronCell.CellAddSegments(builder, cellSegments)
neuron = neuronCell.CellEnd(builder)
builder.Finish(neuron)
buf = builder.Output()
open(filename, 'wb').write(buf)
if destroy:
self.destroyCell(cell)
def segmentsForCell(self, cell):
""" Returns the segments that belong to a cell.
@param cell (int) Cell UID
@return (list)
Segment objects representing segments on the given cell.
"""
return self._cells[cell]._segments
def synapsesForSegment(self, segment):
""" Returns the synapses on a segment.
@param segment (int) Segment index
@return (set)
Synapse objects representing synapses on the given segment.
"""
return segment._synapses
def dataForSynapse(self, synapse):
""" Returns the data for a synapse.
This method exists to match the interface of the C++ Connections. This
allows tests and tools to inspect the connections using a common interface.
@param synapse (Object) Synapse object
@return Synapse data
"""
return synapse
def dataForSegment(self, segment):
""" Returns the data for a segment.
This method exists to match the interface of the C++ Connections. This
allows tests and tools to inspect the connections using a common interface.
@param synapse (Object) Segment object
@return segment data
"""
return segment
def getSegment(self, cell, idx):
""" Returns a Segment object of the specified segment using data from the
self._cells array.
@param cell (int) cell index
@param idx (int) segment index on a cell
@return (Segment) Segment object with index idx on the specified cell
"""
return self._cells[cell]._segments[idx]
def segmentForUID(self, uid):
""" Get the segment with the specified flatIdx.
@param flatIdx (int) The segment's flattened dict index.
@return (Segment) segment object
"""
return self._segmentForUID[uid]
def segmentFlatListLength(self):
""" Get larger than the needed length for a list to hold a value for every segment's
UID.
@return (int) Required list length
Mostly deprecated due to dict usage, but useful for high perormance applications like gpu
"""
return self._nextUID
def synapsesForPresynapticCell(self, presynapticCell):
""" Returns the synapses for the source cell that they synapse on.
@param presynapticCell (int) Source cell index
@return (set) Synapse objects
"""
return self._synapsesForPresynapticCell[presynapticCell]
def numSegments(self, cell=None):
""" Returns the number of segments.
@param cell (int) optional parameter to get the number of segments on a cell
@retval (int) number of segments on all cells if cell is not specified,
or on a specific specified cell
"""
if cell is not None:
return len(self._cells[cell]._segments)
return self._nextUID - len(self._freeUIDs)
def _removeSynapseFromPresynapticMap(self, synapse):
inputSynapses = self._synapsesForPresynapticCell[synapse.presynapticCell]
inputSynapses.remove(synapse)
if len(inputSynapses) == 0:
del self._synapsesForPresynapticCell[synapse.presynapticCell]
def numSynapses(self, segment=None):
""" Returns the number of Synapses.
@param segment (Object) optional parameter to get the number of synapses on
a segment
@retval (int) number of synapses on all segments if segment is not
specified, or on a specified segment
"""
if segment is not None:
return len(segment._synapses)
return self._numSynapses
def _decrementNumSynapses(self):
self._numSynapses -= 1
def _incrementNumSynapses(self):
self._numSynapses += 1
def _next_synapse_ordinal(self):
self._nextSynapseOrdinal +=1
return self._nextSynapseOrdinal-1
def computeActivity(self, activePresynapticCells, connectedPermanence):
""" Compute each segment's number of active synapses for a given input.
In the returned lists, a segment's active synapse count is stored at index
`segment.UID`.
@param activePresynapticCells (iter) active cells
@param connectedPermanence (float) permanence threshold for a synapse
to be considered connected
@return (tuple) Contains:
`numActiveConnectedSynapsesForSegment` (list),
`numActivePotentialSynapsesForSegment` (list)
"""
numActiveConnectedSynapsesForSegment = [0] * self._nextUID
numActivePotentialSynapsesForSegment = [0] * self._nextUID
threshold = connectedPermanence - EPSILON
for cell in activePresynapticCells:
for synapse in self._synapsesForPresynapticCell[cell]:
UID = synapse.segment.UID
numActivePotentialSynapsesForSegment[UID] += 1
if synapse.permanence > threshold:
numActiveConnectedSynapsesForSegment[UID] += 1
return (numActiveConnectedSynapsesForSegment,
numActivePotentialSynapsesForSegment)
def startNewIteration(self):
""" Mark the passage of time. This information is used during segment
cleanup.
"""
self._iteration += 1
def writeToFile(self, filename):
""" Writes serialized data from group to flatbuffers
@param filename (string) save file name/location"""
builder = flatbuffers.Builder(0)
bbox, bboxType = None
if self.locationType == "float":
FloatBBox.FloatBBoxStartCoordinatesVector(builder, len(self.bbox))
for i in reversed(range(0, len(self.bbox))):
builder.PrependFloat32(self.bbox[i])
bbox = builder.EndVector(len(self.bbox))
bboxType = BBox.BBox.FloatBBox
elif self.locationType == "int":
IntPos.IntPosStartCoordinatesVector(builder, len(self.bbox))
for i in reversed(range(0, len(self.bbox))):
builder.PrependUint32(self.bbox[i])
bbox = builder.EndVector(len(self.bbox))
bboxType = BBox.BBox.FloatBBox
ConnectionGroup.ConnectionGroupStart(builder)
ConnectionGroup.ConnectionGroupAddUID(builder, self.UID)
ConnectionGroup.ConnectionGroupAddNumCells(builder, self.numColumns)
ConnectionGroup.ConnectionGroupAddMaxSegmentsPerCell(builder, self.maxSegmentsPerCell)
ConnectionGroup.ConnectionGroupAddMaxSynapsesPerSegment(builder, self.maxSynapsesPerSegment)
ConnectionGroup.ConnectionGroupAddBboxType(builder, bboxType)
ConnectionGroup.ConnectionGroupAddBbox(builder, bbox)
connectionGroup = ConnectionGroup.ConnectionGroupEnd(builder)
builder.Finish(connectionGroup)
buf = builder.Output()
# todo: force these to write correct extension names
open(filename, 'wb').write(buf)
@classmethod
def readFromFile(cls, filename):
""" Reads deserialized data from proto object
@param proto (DynamicStructBuilder) Proto object
@return (Connections) Connections instance
"""
# pylint: disable=W0212
buf = open(filename, 'rb').read()
buf = bytearray(buf)
connection_group = ConnectionGroup.ConnectionGroup.GetRootAsConnectionGroup(buf, 0)
bbox = None
locationType = None
if connection_group.BboxType() == BBox.BBox.IntBBox:
locationType = "int"
bbox_union = IntBBox.IntBBox()
bbox_union.Init(connection_group.Bbox().Bytes, connection_group.Bbox().Pos)
bbox = []
for i in xrange(bbox_union.CoordinatesLength()):
bbox.append(bbox_union.Coordinates(i))
elif connection_group.BboxType() == BBox.BBox.FloatBBox:
locationType = "float"
bbox_union = FloatBBox.FloatBBox()
bbox_union.Init(connection_group.Bbox().Bytes, connection_group.Bbox().Pos)
bbox = []
for i in xrange(bbox_union.CoordinatesLength()):
bbox.append(bbox_union.Coordinates(i))
me = cls(
connection_group.NumCells(),
bbox,
connection_group.MaxSegmentsPerCell(),
connection_group.MaxSynapsesPerSegment(),
locationType,
connection_group.UID(),
startWithNoNeurons=True)
return me
class Cluster(object):
"""Class to hold data about connected groups of neurons with different properties."""
| [
"[email protected]"
] | |
674032ef2c7902a92fc86f8d1c731b4f2225bde8 | 717c07ef9f2192042dd850f916041404b2ab33f5 | /setup.py | 9182f67805f044bbcda64b333aad14127bf4e5c9 | [] | no_license | espenmn/bda.plone.shopviews | 38f6cc0057da4ab41d49ababd6a0b570376c78c5 | 36a2848839e7dea44c1020a98440707196719b99 | refs/heads/master | 2021-01-18T22:24:40.544985 | 2014-02-21T10:11:34 | 2014-02-21T10:11:34 | 10,687,852 | 0 | 0 | null | 2016-09-16T11:35:27 | 2013-06-14T11:48:36 | JavaScript | UTF-8 | Python | false | false | 1,256 | py | import os
from setuptools import (
setup,
find_packages,
)
version = '0.3'
shortdesc = " bda.plone.shop: Demo Views"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.rst')).read()
setup(name='bda.plone.shopviews',
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
author='Espen Moe-Nilssenn',
author_email='[email protected]',
license='GNU General Public Licence',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['bda', 'bda.plone'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Plone',
'bda.plone.shop',
],
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
) | [
"[email protected]"
] | |
84136442ecdc9dd8adb4d27a1c727a5b81d14a55 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03837/s721538098.py | 7ad462b06891930c96b5534664848c98d247d1a8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from scipy.sparse.csgraph import floyd_warshall
N,M = map(int,input().split())
edge = [[float("inf") for i in range(N)] for j in range(N) ]
abc = []
for _ in range(M):
a,b,c = map(int,input().split())
abc.append((a,b,c))
edge[a-1][b-1] = c
edge[b-1][a-1] = c
dist = floyd_warshall(edge)
ans = 0
for a,b,c in abc:
if dist[a-1][b-1] != c:
ans += 1
print(ans) | [
"[email protected]"
] | |
56b611cdd23a3eacfbe961dbbf80ce0bc18ede57 | 546b8c3e1b876aab272e587765951e8acd7b3122 | /irlc/ex04/pid_lunar.py | 7c78652995c803115c7a65deee9af271741577a6 | [] | no_license | natashanorsker/RL_snakes | 2b8a9da5dd1e794e832830ab64e57ab7d4b0d6c3 | be8c75d1aa7a5ba7a6af50a0a990a97b0242c49d | refs/heads/main | 2023-04-21T14:08:30.840757 | 2021-05-11T17:33:35 | 2021-05-11T17:33:35 | 358,572,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,817 | py | """
For information about the Apollo 11 lunar lander see:
https://eli40.com/lander/02-debrief/
For code for the Gym LunarLander environment see:
https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
(although we will only need the time discretization of dt=1/50).
This implementation is inspired by:
https://github.com/wfleshman/PID_Control/blob/master/pid.py
But for some reason I had better success with different parameters for the PID controller.
"""
import gym
import matplotlib.pyplot as plt
import numpy as np
from irlc import VideoMonitor
from irlc import train
from irlc.ex04.pid import PID
from irlc import Agent
from irlc.ex04 import speech
from irlc import savepdf
class ApolloLunarAgent(Agent):
def __init__(self, env, dt, Kp_altitude=18, Kd_altitude=13, Kp_angle=-18, Kd_angle=-18): #Ki=0.0, Kd=0.0, target=0):
self.Kp_altitude = Kp_altitude
self.Kd_altitude = Kd_altitude
self.Kp_angle = Kp_angle
self.Kd_angle = Kd_angle
self.error_angle = []
self.error_altitude = []
self.dt = dt
super().__init__(env)
def pi(self, x, t=None):
""" From documentation: https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
x (list): The state. Attributes:
x[0] is the horizontal coordinate
x[1] is the vertical coordinate
x[2] is the horizontal speed
x[3] is the vertical speed
x[4] is the angle
x[5] is the angular speed
x[6] 1 if first leg has contact, else 0
x[7] 1 if second leg has contact, else 0
Your implementation should follow what happens in:
https://github.com/wfleshman/PID_Control/blob/master/pid.py
I.e. you have to compute the target for the angle and altitude as done in the code (and explained in the documentation.
Note the target for the PID controllers is 0.
"""
if t == 0:
self.pid_alt = PID(dt=self.dt, Kp=self.Kp_altitude, Kd=self.Kd_altitude, Ki=0, target=0)
self.pid_ang = PID(dt=self.dt, Kp=self.Kp_angle, Kd=self.Kd_angle, Ki=0, target=0)
# TODO: 2 lines missing.
raise NotImplementedError("Compute the alt_adj and ang_adj as in the gitlab repo (see code comment).")
u = np.array([alt_adj, ang_adj])
u = np.clip(u, -1, +1)
# If the legs are on the ground we made it, kill engines
if (x[6] or x[7]):
u[:] = 0
# Record stats.
self.error_altitude.append(self.pid_alt.e_prior)
self.error_angle.append(self.pid_ang.e_prior)
return u
def get_lunar_lander(env):
# dt = 1. / env.metadata['video.frames_per_second']
from gym.envs.box2d.lunar_lander import FPS
dt = 1/FPS # Get time discretization from environment.
spars = ['Kp_altitude', 'Kd_altitude', 'Kp_angle', 'Kd_angle']
def x2pars(x2):
return {spars[i]: x2[i] for i in range(4)}
x_opt = np.asarray([52.23302414, 34.55938593, -80.68722976, -38.04571655])
env = VideoMonitor(env)
agent = ApolloLunarAgent(env, dt=dt, **x2pars(x_opt))
return agent
def lunar_single_mission():
env = gym.make('LunarLanderContinuous-v2')
env._max_episode_steps = 1000 # We don't want it to time out.
agent = get_lunar_lander(env)
env = VideoMonitor(env)
stats, traj = train(env, agent, return_trajectory=True, num_episodes=1)
env.close()
if traj[0].reward[-1] == 100:
print("A small step for man, a giant leap for mankind!")
elif traj[0].reward[-1] == -100:
print(speech)
else:
print("Environment timed out and the lunar module is just kind of floating around")
states = traj[0].state
plt.plot(states[:, 0], label='x')
plt.plot(states[:, 1], label='y')
plt.plot(states[:, 2], label='vx')
plt.plot(states[:, 3], label='vy')
plt.plot(states[:, 4], label='theta')
plt.plot(states[:, 5], label='vtheta')
plt.legend()
plt.grid()
plt.ylim(-1.1, 1.1)
plt.title('PID Control')
plt.ylabel('Value')
plt.xlabel('Steps')
savepdf("pid_lunar_trajectory")
plt.show()
def lunar_average_performance():
env = gym.make('LunarLanderContinuous-v2')
env._max_episode_steps = 1000 # We don't want it to time out.
agent = get_lunar_lander(env)
stats, traj = train(env, agent, return_trajectory=True, num_episodes=20)
env.close()
n_won = sum([np.sum(t.reward[-1] == 100) for t in traj])
n_lost = sum([np.sum(t.reward[-1] == -100) for t in traj])
print("Successfull landings: ", n_won, "of 20")
print("Unsuccessfull landings: ", n_lost, "of 20")
if __name__ == "__main__":
lunar_single_mission()
lunar_average_performance()
| [
"[email protected]"
] | |
7970f08b77e75e6b9e3a86cd87cc8b8901e86929 | 3a56e8eb8f2182eabe676c752f5d6629f6db1a9c | /py_gnome/tests/unit_tests/test_weatherer.py | 7a7fa9a7ad36c36222aacb0c4bec409112dd1329 | [] | no_license | kthyng/GNOME2 | ae12b706224762c63d385e303ff431dcac0f7822 | 81f0e73b50b83022bf8327e181a3799fa2d980c1 | refs/heads/master | 2021-01-22T01:51:34.116785 | 2014-03-15T12:49:27 | 2014-03-15T12:49:27 | 15,806,997 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,629 | py | #!/usr/bin/env python
'''
Unit tests for the Weatherer classes
'''
from datetime import datetime, timedelta
import pytest
from pytest import raises
from conftest import sample_sc_release
import numpy
np = numpy
from gnome.utilities.inf_datetime import InfDateTime
from gnome.array_types import mass, rise_vel, mass_components, half_lives
from gnome.elements import (ElementType,
InitMassFromTotalMass,
InitRiseVelFromDist,
InitMassComponentsFromOilProps,
InitHalfLivesFromOilProps
)
from gnome.weatherers import Weatherer
rel_time = datetime(2012, 8, 20, 13) # yyyy/month/day/hr/min/sec
arr_types = {'mass': mass,
'rise_vel': rise_vel,
'mass_components': mass_components,
'half_lives': half_lives
}
initializers = {'mass': InitMassFromTotalMass(),
'rise_vel': InitRiseVelFromDist(),
'mass_components': InitMassComponentsFromOilProps(),
'half_lives': InitHalfLivesFromOilProps()
}
sc = sample_sc_release(5, (3., 6., 0.),
rel_time,
uncertain=False,
arr_types=arr_types,
element_type=ElementType(initializers))
u_sc = sample_sc_release(5, (3., 6., 0.),
rel_time,
uncertain=True,
arr_types=arr_types,
element_type=ElementType(initializers))
secs_in_minute = 60
class TestWeatherer:
def test_init(self):
weatherer = Weatherer()
print weatherer
assert weatherer.on == True
assert weatherer.active == True
assert weatherer.active_start == InfDateTime('-inf')
assert weatherer.active_stop == InfDateTime('inf')
assert weatherer.array_types == {'mass_components': mass_components,
'half_lives': half_lives}
@pytest.mark.parametrize("test_sc", [sc, u_sc])
def test_one_move(self, test_sc):
'''
calls one get_move step and checks that we decayed at the expected
rate.
'''
weatherer = Weatherer()
print '\nsc["mass"]:\n', test_sc['mass']
model_time = rel_time
time_step = 15 * secs_in_minute
weatherer.prepare_for_model_run()
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 0.5)
@pytest.mark.parametrize("test_sc", [sc, u_sc])
def test_one_weather(self, test_sc):
'''
calls one weathering step and checks that we decayed at the expected
rate.
'''
saved_mass = np.copy(test_sc['mass'])
saved_components = np.copy(test_sc['mass_components'])
weatherer = Weatherer()
print '\nsc["mass"]:\n', test_sc['mass']
model_time = rel_time
time_step = 15 * secs_in_minute
weatherer.prepare_for_model_run()
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
weatherer.weather_elements(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\nsc["mass"]:\n', test_sc['mass']
assert np.allclose(test_sc['mass'], 0.5)
assert np.allclose(test_sc['mass_components'].sum(1), 0.5)
test_sc['mass'] = saved_mass
test_sc['mass_components'] = saved_components
@pytest.mark.parametrize("test_sc", [sc, u_sc])
def test_out_of_bounds_model_time(self, test_sc):
'''
Here we test the conditions where the model_time
is outside the range of the weatherer's active
start and stop times.
1: (model_time >= active_stop)
So basically the time duration for our calculation is zero
since the time_step will always be greater than model_time.
And there should be no decay.
2: (model_time < active_start) and (time_step <= active_start)
So basically the time duration for our calculation is zero
and there should be no decay.
3: (model_time < active_start) and (time_step > active_start)
So basically the time duration for our calculation will be
(active_start --> time_step)
The decay will be calculated for this partial time duration.
'''
# rel_time = datetime(2012, 8, 20, 13)
stop_time = rel_time + timedelta(hours=1)
print '\nsc["mass"]:\n', test_sc['mass']
# setup test case 1
model_time = stop_time
time_step = 15 * secs_in_minute
weatherer = Weatherer(active_start=rel_time, active_stop=stop_time)
weatherer.prepare_for_model_run()
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 1.)
# setup test case 2
model_time = rel_time - timedelta(minutes=15)
time_step = 15 * secs_in_minute
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 1.)
# setup test case 3
model_time = rel_time - timedelta(minutes=15)
time_step = 30 * secs_in_minute
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 0.5)
@pytest.mark.parametrize("test_sc", [sc, u_sc])
def test_out_of_bounds_time_step(self, test_sc):
'''
Here we test the conditions where the time_step
is outside the range of the weatherer's active
start and stop times.
4: (model_time < active_stop) and (time_step > active_stop)
So basically the time duration for our calculation will be
(model_time --> active_stop)
The decay will be calculated for this partial time duration.
'''
# rel_time = datetime(2012, 8, 20, 13)
stop_time = rel_time + timedelta(hours=1)
print '\nsc["mass"]:\n', test_sc['mass']
# setup test case 4
model_time = stop_time - timedelta(minutes=15)
time_step = 30 * secs_in_minute
weatherer = Weatherer(active_start=rel_time, active_stop=stop_time)
weatherer.prepare_for_model_run()
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 0.5)
@pytest.mark.parametrize("test_sc", [sc, u_sc])
def test_model_time_range_surrounds_active_range(self, test_sc):
'''
Here we test the condition where the model_time and time_step
specify a time range that completely surrounds the range of the
weatherer's active start and stop times.
5: (model_time < active_start) and (time_step > active_stop)
So basically the time duration for our calculation will be
(active_start --> active_stop)
The decay will be calculated for this partial time duration.
'''
stop_time = rel_time + timedelta(minutes=15)
print '\nsc["mass"]:\n', test_sc['mass']
# setup test case 5
model_time = rel_time - timedelta(minutes=15)
time_step = 45 * secs_in_minute
weatherer = Weatherer(active_start=rel_time, active_stop=stop_time)
weatherer.prepare_for_model_run()
weatherer.prepare_for_model_step(test_sc, time_step, model_time)
decayed_mass = weatherer.get_move(test_sc, time_step, model_time)
weatherer.model_step_is_done()
print '\ndecayed_mass:\n', decayed_mass
assert np.allclose(decayed_mass.sum(1), 0.5)
| [
"[email protected]"
] | |
577f382690dff09da10759418a2580aa9d7df66f | aadcddb4daee7ae84cb0785f9b0e82d8b5f6a1af | /gemtown/users/migrations/0008_auto_20190424_2025.py | b75ca7729635aea695e0c8b10f8dd2bbd06538d3 | [
"MIT"
] | permissive | doramong0926/gemtown | 082d210493930312ad3ecf3e813f568204979387 | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | refs/heads/master | 2022-12-12T10:23:11.363452 | 2019-05-23T17:18:03 | 2019-05-23T17:18:03 | 183,075,120 | 0 | 0 | NOASSERTION | 2022-12-09T20:37:50 | 2019-04-23T18:37:03 | JavaScript | UTF-8 | Python | false | false | 1,538 | py | # Generated by Django 2.0.13 on 2019-04-24 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20190424_2011'),
]
operations = [
migrations.AlterField(
model_name='user',
name='country',
field=models.CharField(blank=True, choices=[('hk', 'Hong Kong'), ('kr', 'Korea'), ('id', 'Indonesia'), ('cn', 'China'), ('sg', 'Singapore'), ('vn', 'Viet Nam'), ('jp', 'Japan'), ('us', 'United States of America')], default='kr', max_length=80),
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(blank=True, choices=[('female', 'Female'), ('not_specified', 'Not specified'), ('foregin_male', 'Foregin_Male'), ('foregin_female', 'Foregin_Female'), ('male', 'Male')], max_length=80),
),
migrations.AlterField(
model_name='user',
name='mobile_country',
field=models.CharField(blank=True, choices=[('hk', 'Hong Kong'), ('kr', 'Korea'), ('id', 'Indonesia'), ('cn', 'China'), ('sg', 'Singapore'), ('vn', 'Viet Nam'), ('jp', 'Japan'), ('us', 'United States of America')], default='kr', max_length=80),
),
migrations.AlterField(
model_name='user',
name='user_class',
field=models.CharField(blank=True, choices=[('artist', 'Artist'), ('common', 'Common'), ('company', 'Company')], default='nomal', max_length=80),
),
]
| [
"[email protected]"
] | |
dd6166e54ce3f918f2668c99ad1befd66efdcbb8 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/l3v4usage32hist15min.py | 08053946749e74e7db77d1a59df684661cebc61f | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 32,808 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3v4Usage32Hist15min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3v4Usage32Hist15min", "Layer3 v4 32 entries usage count")
counter = CounterMeta("v4Total", CounterCategory.COUNTER, "count", "Total v4 32 Routes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v4TotalCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v4TotalPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4TotalMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4TotalMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4TotalAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4TotalSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4TotalThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4TotalTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v4TotalRate"
meta._counters.append(counter)
counter = CounterMeta("v4Mc", CounterCategory.COUNTER, "count", "Total v4 32 MC Routes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v4McCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v4McPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4McMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4McMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4McAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4McSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4McThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4McTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v4McRate"
meta._counters.append(counter)
counter = CounterMeta("v4Uc", CounterCategory.COUNTER, "count", "Total v4 32 UC Routes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v4UcCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v4UcPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4UcMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4UcMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4UcAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4UcSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4UcThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4UcTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v4UcRate"
meta._counters.append(counter)
counter = CounterMeta("v4Ep", CounterCategory.COUNTER, "count", "Total v4 32 Endpoints")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "v4EpCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "v4EpPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4EpMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4EpMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4EpAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4EpSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4EpThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4EpTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "v4EpRate"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3v4Usage32Hist15min"
meta.rnFormat = "HDeqptcapacityL3v4Usage3215min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Layer3 v4 32 entries usage count stats in 15 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.L3v4Usage32Hist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDeqptcapacityL3v4Usage3215min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 43928, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v4EpAvg", "v4EpAvg", 43984, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 32 Endpoints average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpAvg", prop)
prop = PropMeta("str", "v4EpCum", "v4EpCum", 43980, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v4 32 Endpoints cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpCum", prop)
prop = PropMeta("str", "v4EpMax", "v4EpMax", 43983, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 32 Endpoints maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpMax", prop)
prop = PropMeta("str", "v4EpMin", "v4EpMin", 43982, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 32 Endpoints minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpMin", prop)
prop = PropMeta("str", "v4EpPer", "v4EpPer", 43981, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v4 32 Endpoints periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpPer", prop)
prop = PropMeta("str", "v4EpRate", "v4EpRate", 43988, PropCategory.IMPLICIT_RATE)
prop.label = "Total v4 32 Endpoints rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpRate", prop)
prop = PropMeta("str", "v4EpSpct", "v4EpSpct", 43985, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 32 Endpoints suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpSpct", prop)
prop = PropMeta("str", "v4EpThr", "v4EpThr", 43986, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 32 Endpoints thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4EpThr", prop)
prop = PropMeta("str", "v4EpTr", "v4EpTr", 43987, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 32 Endpoints trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4EpTr", prop)
prop = PropMeta("str", "v4McAvg", "v4McAvg", 44005, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 32 MC Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McAvg", prop)
prop = PropMeta("str", "v4McCum", "v4McCum", 44001, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v4 32 MC Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McCum", prop)
prop = PropMeta("str", "v4McMax", "v4McMax", 44004, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 32 MC Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McMax", prop)
prop = PropMeta("str", "v4McMin", "v4McMin", 44003, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 32 MC Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McMin", prop)
prop = PropMeta("str", "v4McPer", "v4McPer", 44002, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v4 32 MC Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McPer", prop)
prop = PropMeta("str", "v4McRate", "v4McRate", 44009, PropCategory.IMPLICIT_RATE)
prop.label = "Total v4 32 MC Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McRate", prop)
prop = PropMeta("str", "v4McSpct", "v4McSpct", 44006, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 32 MC Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McSpct", prop)
prop = PropMeta("str", "v4McThr", "v4McThr", 44007, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 32 MC Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4McThr", prop)
prop = PropMeta("str", "v4McTr", "v4McTr", 44008, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 32 MC Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4McTr", prop)
prop = PropMeta("str", "v4TotalAvg", "v4TotalAvg", 44026, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 32 Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalAvg", prop)
prop = PropMeta("str", "v4TotalCum", "v4TotalCum", 44022, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v4 32 Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalCum", prop)
prop = PropMeta("str", "v4TotalMax", "v4TotalMax", 44025, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 32 Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalMax", prop)
prop = PropMeta("str", "v4TotalMin", "v4TotalMin", 44024, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 32 Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalMin", prop)
prop = PropMeta("str", "v4TotalPer", "v4TotalPer", 44023, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v4 32 Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalPer", prop)
prop = PropMeta("str", "v4TotalRate", "v4TotalRate", 44030, PropCategory.IMPLICIT_RATE)
prop.label = "Total v4 32 Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalRate", prop)
prop = PropMeta("str", "v4TotalSpct", "v4TotalSpct", 44027, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 32 Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalSpct", prop)
prop = PropMeta("str", "v4TotalThr", "v4TotalThr", 44028, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 32 Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4TotalThr", prop)
prop = PropMeta("str", "v4TotalTr", "v4TotalTr", 44029, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 32 Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalTr", prop)
prop = PropMeta("str", "v4UcAvg", "v4UcAvg", 44047, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 32 UC Routes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcAvg", prop)
prop = PropMeta("str", "v4UcCum", "v4UcCum", 44043, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total v4 32 UC Routes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcCum", prop)
prop = PropMeta("str", "v4UcMax", "v4UcMax", 44046, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 32 UC Routes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcMax", prop)
prop = PropMeta("str", "v4UcMin", "v4UcMin", 44045, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 32 UC Routes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcMin", prop)
prop = PropMeta("str", "v4UcPer", "v4UcPer", 44044, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total v4 32 UC Routes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcPer", prop)
prop = PropMeta("str", "v4UcRate", "v4UcRate", 44051, PropCategory.IMPLICIT_RATE)
prop.label = "Total v4 32 UC Routes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcRate", prop)
prop = PropMeta("str", "v4UcSpct", "v4UcSpct", 44048, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 32 UC Routes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcSpct", prop)
prop = PropMeta("str", "v4UcThr", "v4UcThr", 44049, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 32 UC Routes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4UcThr", prop)
prop = PropMeta("str", "v4UcTr", "v4UcTr", 44050, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 32 UC Routes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4UcTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
bc14781f6113bc5ea4c576963d9b7ad2dc85d784 | 05e2277cf1af409123f43fc0a3226014dd170556 | /5338.py | 9b83d26fcb85b3f1b1a009a2a3cfa9816de6a4b9 | [] | no_license | 2021-01-06/baekjoon | 4dec386574ce9f51f589a944b71436ce1eb2521e | ca8f02ecbed11fe98adfd1c18ce265b10f1298bc | refs/heads/main | 2023-05-06T08:19:53.943479 | 2021-05-14T03:25:55 | 2021-05-14T03:25:55 | 327,730,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py |
r = " _.-;;-._\n'-..-'| || |\n'-..-'|_.-;;-._|\n'-..-'| || |\n'-..-'|_.-''-._|"
print(r) | [
"[email protected]"
] | |
2126d0e6f819d2f8228404e6da813259e36d4d9d | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/surface/dataflow/logs/list.py | f28762d652662cd185af353118e8e3f26c73be04 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 4,207 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud dataflow logs list command.
"""
from googlecloudsdk.api_lib.dataflow import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataflow import dataflow_util
from googlecloudsdk.command_lib.dataflow import job_utils
from googlecloudsdk.command_lib.dataflow import time_util
from googlecloudsdk.core.resource import resource_projection_spec
class List(base.ListCommand):
"""Retrieve the job logs for a specific job.
Retrieves the job logs from a specified job using the Dataflow Messages API
with at least the specified importance level. Can also be used to display
logs between a given time period using the --before and --after flags. These
logs are produced by the service and are distinct from worker logs. Worker
logs can be found in Cloud Logging.
## EXAMPLES
Retrieve only error logs:
$ {command} --importance=error
Retrieve all logs after some date:
$ {command} --after="2016-08-12 00:00:00"
"""
@staticmethod
def Args(parser):
job_utils.ArgsForJobRef(parser)
base.SORT_BY_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
base.ASYNC_FLAG.RemoveFromParser(parser)
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
parser.add_argument(
'--after',
type=time_util.ParseTimeArg,
help='Only display messages logged after the given time. Time format is'
' yyyy-mm-dd hh-mm-ss')
parser.add_argument(
'--before',
type=time_util.ParseTimeArg,
help='Only display messages logged before the given time. Time format'
' is yyyy-mm-dd hh-mm-ss')
parser.add_argument(
'--importance',
choices=['debug', 'detailed', 'warning', 'error'],
default='warning',
help='Minimum importance a message must have to be displayed.')
def Collection(self):
return 'dataflow.logs'
def Defaults(self):
importances = {
'JOB_MESSAGE_DETAILED': 'd',
'JOB_MESSAGE_DEBUG': 'D',
'JOB_MESSAGE_WARNING': 'W',
'JOB_MESSAGE_ERROR': 'E',
}
symbols = {'dataflow.JobMessage::enum': importances}
return resource_projection_spec.ProjectionSpec(symbols=symbols)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: all the arguments that were provided to this command invocation.
Returns:
None on success, or a string containing the error message.
"""
job_ref = job_utils.ExtractJobRef(args.job)
importance_enum = (
apis.Messages.LIST_REQUEST.MinimumImportanceValueValuesEnum)
importance_map = {
'debug': importance_enum.JOB_MESSAGE_DEBUG,
'detailed': importance_enum.JOB_MESSAGE_DETAILED,
'error': importance_enum.JOB_MESSAGE_ERROR,
'warning': importance_enum.JOB_MESSAGE_WARNING,
}
request = apis.Messages.LIST_REQUEST(
projectId=job_ref.projectId,
jobId=job_ref.jobId,
minimumImportance=(args.importance and importance_map[args.importance]),
# Note: It if both are present, startTime > endTime, because we will
# return messages with actual time [endTime, startTime).
startTime=args.after and time_util.Strftime(args.after),
endTime=args.before and time_util.Strftime(args.before))
return dataflow_util.YieldFromList(
job_id=job_ref.jobId,
project_id=job_ref.projectId,
service=apis.Messages.GetService(),
request=request,
batch_size=args.limit,
batch_size_attribute='pageSize',
field='jobMessages')
| [
"[email protected]"
] | |
a1a5d6d35688b5d03aa7b87e61ffdde734cd42d9 | 80568d6f2ae7d811d12492998edac55c74d07b08 | /src/backend/app/tests/__init__.py | 7ad6d08bb9c719982929d19d29077c2cdc95f986 | [
"MIT"
] | permissive | skaersoe/web-app-skeleton | 166aec034111aaa21d03386c1699317ce3bc1621 | af7580822b18e714d7c0b54f6fdcd11bf57f16e0 | refs/heads/master | 2023-04-22T10:01:16.986724 | 2020-11-06T23:20:04 | 2020-11-06T23:20:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | # -*- coding: utf-8 -*-
"""
Unit tests & utilities for the application.
"""
| [
"[email protected]"
] | |
9bf73ed630402dac7ae78a0d8024344fb7bc36c1 | a20cb5dfd6ae2e5067a822f3b828a7c72e55489a | /7_Reverse_Integer.py | 3ff33ebfc65f4d84fb3ae102c4fc7428f7f587d1 | [
"MIT"
] | permissive | rpm1995/LeetCode | 51f6325cf77be95bb1106d18de75974e03dba9b7 | 147d99e273bc398c107f2aef73aba0d6bb88dea0 | refs/heads/master | 2021-12-07T12:00:59.386002 | 2021-08-12T02:55:19 | 2021-08-12T02:55:19 | 193,178,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
negative = False
if x < 0:
negative = True
# x = str(abs(x))
# y = ""
# for i in range(len(x) - 1, -1 ,-1):
# y += x[i]
# if negative is True:
# y = "-" + y
# y.strip()
# if int(y) < -2**31 or int(y) > (2**31) -1:
# return 0
# return int(y)
x = abs(x)
y = 0
while x:
y = (y * 10) + (x % 10)
x = x // 10
if negative:
y *= -1
return y if -2 ** 31 < y < 2 ** 31 else 0
| [
"[email protected]"
] | |
ce6e545dd6aeacb825dbc02fce6b0fdcd0e09478 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_list_nonlocal-83.py | 700f6f1b7fdff2447bb0082637f068e893d2663f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | x:int = 0
def crunch(zz:[[int]]) -> object:
z:[int] = None
global x
def make_z() -> object:
nonlocal z
for z in zz:
pass # Set z to last element in zz
make_z()
for x in z:
pass # Set x to last element in z
crunch([[$Exp,2],[2,3],[4,5],[6,7]])
print(x)
| [
"[email protected]"
] | |
1873a83d2c391a13a16edf28969ffb67056d7544 | 54c7e0d5c63246c46652292f3817fa6d46512fa8 | /apps/base/models.py | 5efae51e43b1c0788011dfc7ca040c98509e0f35 | [
"Apache-2.0"
] | permissive | helianthus1997/BlogBackendProject | 4360d3837fa27b78c1e57c84d94f1055333ae15c | d4c0ee0bf19e5578e07425465930e1004cbe16d7 | refs/heads/master | 2020-03-14T11:29:53.918567 | 2018-04-14T17:00:39 | 2018-04-14T17:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,048 | py | import hashlib
from django.db import models
from material.models import MaterialSocial, MaterialMaster
class NavigationLink(models.Model):
"""
自定义导航
"""
TARGET_TYPE = (
("_blank", "blank - 浏览器总在一个新打开、未命名的窗口中载入目标文档。"),
("_self", "self - 这个目标的值对所有没有指定目标的 <a> 标签是默认目标,它使得目标文档载入并显示在相同的框架或者窗口中作为源文档。这个目标是多余且不必要的,除非和文档标题 <base> 标签中的 target 属性一起使用。"),
("_parent", "parent - 这个目标使得文档载入父窗口或者包含来超链接引用的框架的框架集。如果这个引用是在窗口或者在顶级框架中,那么它与目标 _self 等效。"),
("_top", "top - 这个目标使得文档载入包含这个超链接的窗口,用 _top 目标将会清除所有被包含的框架并将文档载入整个浏览器窗口。")
)
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.CharField(max_length=200, verbose_name="链接", help_text="链接")
target = models.CharField(max_length=10, choices=TARGET_TYPE, null=True, blank=True, verbose_name="Target类别",
help_text="对应于a标签中的target属性")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "自定义导航"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=20, verbose_name="简介", help_text="简介")
icon = models.ImageField(upload_to="base/site/image/%y/%m", null=True, blank=True, verbose_name="图标", help_text="图标")
api_base_url = models.URLField(max_length=30, null=False, blank=False, verbose_name='API接口BaseURL')
navigations = models.ManyToManyField(NavigationLink, through="SiteInfoNavigation", through_fields=(
'site', 'navigation'), verbose_name='自定义导航', help_text='自定义导航')
copyright = models.CharField(default="", max_length=100, verbose_name="版权", help_text="版权")
icp = models.CharField(default="", max_length=20, verbose_name="ICP", help_text="ICP")
is_live = models.BooleanField(default=False, verbose_name="是否激活", help_text="是否激活")
is_force_refresh = models.BooleanField(default=False, verbose_name="是否强制刷新", help_text="是否强制刷新")
access_password = models.CharField(max_length=20, null=True, blank=True, verbose_name="访问密码", help_text="浏览密码")
access_password_encrypt = models.CharField(max_length=100, null=True, blank=True, verbose_name="浏览密码加密",
help_text="访问密码加密")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.access_password:
md5 = hashlib.md5()
md5.update(self.access_password.encode('utf8'))
self.access_password_encrypt = md5.hexdigest()
else:
self.access_password_encrypt = ''
super(SiteInfo, self).save(*args, **kwargs)
class Meta:
verbose_name = "网站信息"
verbose_name_plural = verbose_name + '列表'
class BloggerInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=300, verbose_name="简介", help_text="简介")
avatar = models.ImageField(upload_to="base/avatar/image/%y/%m", null=True, blank=True, verbose_name="头像", help_text="100*100")
background = models.ImageField(upload_to="base/background/image/%y/%m", null=True, blank=True, verbose_name="背景图", help_text="333*125")
socials = models.ManyToManyField(MaterialSocial, through='BloggerSocial', through_fields=('blogger', 'social'))
masters = models.ManyToManyField(MaterialMaster, through='BloggerMaster', through_fields=('blogger', 'master'))
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "个人信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerSocial(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
social = models.ForeignKey(MaterialSocial, verbose_name="社交平台", help_text="社交平台")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "社交信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerMaster(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
master = models.ForeignKey(MaterialMaster, verbose_name="技能", help_text="技能")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "技能信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfoNavigation(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
site = models.ForeignKey(SiteInfo, verbose_name="网站", help_text="网站")
navigation = models.ForeignKey(NavigationLink, verbose_name="导航", help_text="导航")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "导航信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class FriendLink(models.Model):
"""
友情链接
"""
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.URLField(max_length=200, verbose_name="链接", help_text="链接")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "友情链接"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
| [
"[email protected]"
] | |
2d24fc4a0d81288c02b7170e0b960d7804117ead | a807ce0fa3e3e9c3b558b2e977c05e60c3a667b1 | /scripts/speaker_recognition/rttm_to_manifest.py | 3d7b772b05b94d534edf34d0698165658d9e6063 | [
"Apache-2.0"
] | permissive | blisc/NeMo | 630376e7555c0face994da2f6f9af5d8d31243c3 | fadeb45c84d6b323d78e30475538455a88b7c151 | refs/heads/rework_reqs | 2023-08-17T00:03:39.248669 | 2021-08-12T15:15:06 | 2021-08-12T15:15:06 | 208,142,160 | 2 | 0 | Apache-2.0 | 2022-02-03T16:30:33 | 2019-09-12T20:37:24 | Jupyter Notebook | UTF-8 | Python | false | false | 1,666 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest
from nemo.utils import logging
"""
This file converts vad outputs to manifest file for speaker diarization purposes
present in vad output directory.
every vad line consists of start_time, end_time , speech/non-speech
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--paths2rttm_files", help="path to vad output rttm-like files. Could be a list or a text file", required=True
)
parser.add_argument(
"--paths2audio_files",
help="path to audio files that vad was computed. Could be a list or a text file",
required=True,
)
parser.add_argument("--manifest_file", help="output manifest file name", type=str, required=True)
args = parser.parse_args()
write_rttm2manifest(args.paths2audio_files, args.paths2rttm_files, args.manifest_file)
logging.info("wrote {} file from vad output files present in {}".format(args.manifest_file, args.paths2rttm_files))
| [
"[email protected]"
] | |
91ac240c83a14f13f239bfa42d189a7ab771a61f | e71fa62123b2b8f7c1a22acb1babeb6631a4549b | /xlsxwriter/test/comparison/test_chart_pie02.py | da43f3a95439dfc445a57e81a063b68c6b55a26e | [
"BSD-2-Clause"
] | permissive | timgates42/XlsxWriter | 40480b6b834f28c4a7b6fc490657e558b0a466e5 | 7ad2541c5f12b70be471b447ab709c451618ab59 | refs/heads/main | 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 | NOASSERTION | 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null | UTF-8 | Python | false | false | 1,229 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pie02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'font': {'bold': 1, 'italic': 1, 'baseline': -1}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
8915d6ab351b062c940bd3ec98322f3559a95024 | 1546f4ebbb639fddf05d4aebea263931681eaeeb | /code/app/simulation/action/base.py | f63f1c57de9de7e666e7fd5ee0f69e77221c0e48 | [] | no_license | ferdn4ndo/the-train-app | 34e9c885a658f51e42ec6184ca8058872b266244 | 4650433f7f860df3de1f7502cb052891c410618d | refs/heads/master | 2023-01-21T07:15:02.984308 | 2020-11-22T23:57:28 | 2020-11-22T23:57:28 | 315,156,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | from app.common.logger import generate_logger, LoggerFolders
from app.simulation.exception.error import ConflictConditionError
class BaseAction:
name = "none"
abbrev = "---"
def __init__(self):
"""Class constructor"""
self.moving_towards_section = ""
self.lookup_train_prefix = ""
self.executed = False
@staticmethod
def is_applicable(dispatcher, train):
"""Define the criteria for the action application (should be overridden by children)"""
return True
def was_executed(self, train):
"""Define the criteria for the action to be considered executed (could be overridden by children)"""
return self.executed
def serialize(self):
return {
"name": self.name,
"abbrev": self.abbrev,
"executed": self.executed,
"description": self.describe()
}
def describe(self):
"""Define the message to describe the action (should be overridden by children)"""
return "No action (idle)"
def execute(self, dispatcher, train):
"""Define the action execution method (should be overridden by children)"""
self.executed = True
def move_to(self, dispatcher, train, next_section=None):
"""Helper function to be used by functions that moves a train from a section to another"""
self.moving_towards_section = next_section.name if next_section is not None else ''
if not train.is_at_section_end():
train.go_at_maximum_speed()
return
train.stop()
# if reached section end and there's no next straight section to move, raise error
if next_section is None:
raise ConflictConditionError("Tried to move into a non-existing section")
# if section is not occupied, move the train to it
if not dispatcher.is_section_occupied(next_section, train.is_reversed):
dispatcher.move_train_to_section(train, next_section)
# in any case (being moved to the new section or not due to its occupancy), mark the action as executed
self.executed = True
| [
"[email protected]"
] | |
71a7bfb7356831f49556629bc9fe1232ac1a10ec | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/ML_T2_Validation_20210612130104.py | 0801f4946cabc4c874db2864078408c2e39b6465 | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 9,227 | py | #T2 TEST DATA
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy import interpolate
from scipy.integrate import simps
from numpy import trapz
# %%
#Load Stack
UVStack = pd.read_excel('./ML_Results/T2_test/ImgStack.xls')
ImgStackk = UVStack.copy().to_numpy()
# %%
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %% Load and resample "results" (res) file
sub = pd.read_excel('./ML_Results/T2_test/sub.xls')
res = pd.read_excel('./ML_Results/T2_test/Results.xls')
res = res[res.Well == 'T2']
res.sort_values(by=['DEPT'])
res.drop(['Unnamed: 0', 'Set'], axis=1, inplace=True)
res.reset_index(inplace=True, drop=True)
dep = np.arange(min(res.DEPT), max(res.DEPT),0.5) #res is not at 0.5 thanks to balancing
res_rs = pd.DataFrame(columns=[res.columns])
res_rs.DEPT = dep
for i in range(len(res.columns)):
if i != 8:
f = interpolate.interp1d(res.DEPT, res.iloc[:,i])
res_rs.iloc[:,i] =f(dep)
else:
res_rs.iloc[:,i] = res.Well[0]
#T2_rs.dropna(inplace=True)
res = res_rs.copy()
difference = res.DEPT.diff()
difference.describe()
# %%
TT = pd.read_excel('./ML_Results/Train_Test_Results.xls')
istr = 0
iend = 42344
dplot_o = 3671
dplot_n = 3750
shading = 'bone'
# %% Load Log Calculations
T2_x = pd.read_excel('./Excel_Files/T2.xls',sheet_name='T2_data')
T2_x = T2_x[['DEPTH','GR_EDTC','RHOZ','AT90','NPHI','Vsh','Vclay','grain_density','porosity',
'RW2','Sw_a','Sw_a1','Sw_p','Sw_p1','SwWS','Swsim','Swsim1','PAY_archie',
'PAY_poupon','PAY_waxman','PAY_simandoux']]
# %%
T2_rs = pd.DataFrame(columns=[T2_x.columns])
T2_rs.iloc[:,0] = dep
for i in range(len(T2_x.columns)):
f = interpolate.interp1d(T2_x.DEPTH, T2_x.iloc[:,i])
T2_rs.iloc[:,i] =f(dep)
#T2_rs.dropna(inplace=True)
T2_x = T2_rs.copy()
difference_T2 = T2_x.DEPTH.diff()
difference.describe()
# %%
plt.figure()
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.gca().invert_yaxis();
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB')
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.gca().invert_yaxis()
plt.xlabel('Processed Image')
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
plt.subplots_adjust(wspace = 20, left = 0.1, right = 0.9, bottom = 0.1, top = 0.9)
plt.show()
# %%
CORE =pd.read_excel('./CORE/CORE.xlsx',sheet_name='XRD')
mask = CORE.Well.isin(['T2'])
T2_Core = CORE[mask]
prof=T2_Core['Depth']
clays=T2_Core['Clays']
xls1 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Saturation')
mask = xls1.Well.isin(['T2'])
T2_sat = xls1[mask]
long=T2_sat ['Depth']
poro=T2_sat ['PHIT']
grain=T2_sat ['RHOG']
sw_core=T2_sat ['Sw']
klinkenberg = T2_sat ['K']
minimo=grain.min()
maximo=grain.max()
c=2.65
d=2.75
norm=(((grain-minimo)*(d-c)/(maximo-minimo))+c)
xls2 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Gamma')
mask = xls2.Well.isin(['T2'])
T2_GR = xls2[mask]
h=T2_GR['Depth']
cg1=T2_GR['GR_Scaled']
# %%
# ~~~~~~~~~~~~~~~~~~ Plot Results ~~~~~~~~~~~~~~~~~~~~~~
ct = 0
top= dplot_o
bottom= dplot_n
no_plots = 9
ct+=1
plt.figure(figsize=(10,9))
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.GR_EDTC,T2_x.DEPTH,'g', lw=3)
#plt.fill_between(T2_x.GR_EDTC.values.reshape(-1), T2_x.DEPTH.values.reshape(-1), y2=0,color='g', alpha=0.8)
plt.title('$Gamma Ray$',fontsize=8)
plt.axis([40,130,top,bottom])
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.xlabel('Gamma Ray ',fontsize=6)
plt.gca().invert_yaxis()
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_poupon,T2_x.DEPTH,'r',lw=0.5)
h_P = integrate(T2_x.PAY_poupon.values, 0.5)
plt.title('$PAY_Poupon$',fontsize=8)
plt.fill_between(T2_x.PAY_poupon.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='r', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Waxman-Smits
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_waxman,T2_x.DEPTH,'g',lw=0.5)
h_WS = integrate(T2_x.PAY_waxman.values, 0.5)
plt.title('$PAY_W$',fontsize=8)
plt.fill_between(T2_x.PAY_waxman.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='g', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Simandoux
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_simandoux,T2_x.DEPTH,'y',lw=0.5)
h_S = integrate(T2_x.PAY_simandoux.values, 0.5)
plt.title('$PAY_S$',fontsize=8)
plt.fill_between(T2_x.PAY_simandoux.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='y', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1 #RGB Gray from Image
plt.subplot(1,no_plots,ct)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
#plt.title('$Core Img$',fontsize=8)
plt.gca().invert_yaxis();
plt.gca().yaxis.set_visible(False)
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB', fontsize=7)
ct+=1 # True UV from Image
plt.subplot(1,no_plots,ct, facecolor='#302f43')
corte= 170
PAY_Gray_scale = res['GRAY'].copy()
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY<corte] = 0
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY>=corte] = 1
h_TRUE_UV = integrate(PAY_Gray_scale.values, 0.5)
plt.plot (PAY_Gray_scale,res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$OBJETIVO (suavizado a 2,5ft)$',fontsize=10)
plt.fill_between(PAY_Gray_scale.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
ct+=1
plt.subplot(1,no_plots,ct)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.xlabel('Stacked UV Photos', fontsize=7)
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (res['RandomForest'],res.DEPT,'r',lw=1)
plt.plot (res.GRAY,res.DEPT,'k',lw=0.5)
plt.title('Machine Learning',fontsize=8)
plt.axis([0,2,top,bottom])
plt.xticks(fontsize=8)
plt.xlabel('RandomForest',fontsize=7)
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(0, 255)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct, facecolor='#302f43')
PAY_Gray_scale2 = res['RandomForest'].copy().rename(columns={'RandomForest':'GRAY'})
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY<corte] = 0
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY>=corte] = 1
h_ML = integrate(PAY_Gray_scale2.values, 0.5)
plt.plot (PAY_Gray_scale2, res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$RESULTADO: TEST Set$',fontsize=8)
plt.fill_between(PAY_Gray_scale2.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.suptitle('Pozo T2: Comparación Final')
plt.show()
# %%
# %%
plt.figure(figsize=(10,9))
plt.subplot(1,1,1)
plt.plot(res.GRAY, res['RandomForest'], 'ko')
plt.plot(res.GRAY, res.GRAY, 'r')
plt.xlim(0, 255)
plt.ylim(0, 255)
plt.xlabel('Valor en Escala de Gris Suavizado a res. de Registros',fontsize=17)
plt.ylabel('Predicción de Escala de Gris usando Random Forest',fontsize=17)
plt.show()
# %% Erro Calculation
# T2_x.PAY_poupon,T2_x.DEPTH
# T2_x.PAY_waxman
# T2_x.PAY_simandoux
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %%
pay = pd.DataFrame(columns=['Poupon', 'Waxman_Smits', 'Simandoux', 'Machine_L', 'True_UV'])
pay.Poupon = h_P
pay.Waxman_Smits = h_WS
pay.Simandoux = h_S
pay.Machine_L = h_ML
pay.True_UV = h_TRUE_UV
pay.head()
#rmse['Poupon'] = mean_squared_error(y_test, y_pred_test, squared=False)
# %%
| [
"[email protected]"
] | |
e0d03d82a89f95990dd13ba64cb7019fc71d2dd3 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/vmware/nsx/manager/bridge_endpoint/api/nsx70_crud_impl.py | 008596b40abee0735845510a687241e799f38991 | [] | no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import vmware.nsx_api.manager.bridgeendpoint.bridgeendpoint\
as bridgeendpoint
import vmware.nsx_api.manager.bridgeendpoint.schema.bridgeendpoint_schema\
as bridgeendpoint_schema
import vmware.nsx.manager.api.base_crud_impl as base_crud_impl
class NSX70CRUDImpl(base_crud_impl.BaseCRUDImpl):
_attribute_map = {
'id_': 'id',
'name': 'display_name',
'summary': 'description',
'guest_vlan': 'guest_vlan_tag',
'node_id': 'bridge_cluster_id',
'vlan_id': 'vlan',
'ha': 'ha_enable'
}
_client_class = bridgeendpoint.BridgeEndpoint
_schema_class = bridgeendpoint_schema.BridgeEndpointSchema
| [
"[email protected]"
] | |
a4129e72e7a4b57f2a59c688c107e9ab48afe0a9 | 57845ff6759377884092d8d1c5fe82244e30b108 | /code/era5_heat_comp/bias_correction.py | df5ba6a607bd21e394286839459eee3bba4e7534 | [
"CC-BY-4.0"
] | permissive | l5d1l5/project10 | c11424c7329cdc264f6fedd974a8f887fe6c8cf8 | ded1ef7ce04573d669c2f0352b03317f64f1f967 | refs/heads/main | 2023-06-10T17:17:01.748249 | 2021-06-09T13:20:35 | 2021-06-09T13:20:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,728 | py | #!/usr/bin/env python
"""
Bias correction for the UTCI dataset
Both the climate model data and the ERA5-HEAT data have been regridded to 1x1 degree and uploaded to JASMIN. Here we use the ERA5-HEAT dataset from 1985 to 2014 and compare this to the derived UTCI from each climate model.
Therefore, instead of bias-correcting temperature or any other variables, we bias correct the derived UTCI.
We therefore assume that ERA5-HEAT is "Truth"! To be fair, I would probably bias correct the individual variables against their ERA5 counterparts. Additionally, for all except temperature this becomes a little tricky and subjective.
"""
import iris
from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import unify_time_units
import iris.analysis.cartography
import iris.coord_categorisation
import matplotlib.pyplot as pl
from climateforcing.utils import mkdir_p
import numpy as np
#import pickle
import scipy.stats as st
from tqdm import tqdm
# ## Obtain historical "training" distributions
era5heatdir = '/gws/pw/j05/cop26_hackathons/bristol/project10/era5-heat_1deg/'
modeldir = '/gws/pw/j05/cop26_hackathons/bristol/project10/utci_projections_1deg/HadGEM3-GC31-LL/historical/r1i1p1f3/'
## just 30 years for now
## load up the regridding annual chunks and concatenate
cube_era5 = iris.load(era5heatdir + 'ECMWF_utci_*_v1.0_con.nc')
equalise_attributes(cube_era5)
unify_time_units(cube_era5)
for cu in cube_era5:
cu.coord('time').points = cu.coord('time').points.astype(int)
cube_era5 = cube_era5.concatenate_cube()
## also 30 years of HadGEM3 historical
cube_model = iris.load(modeldir + 'utci_3hr_HadGEM3-GC31-LL_historical_r1i1p1f3_gn_*.nc')
cube_model = cube_model.concatenate_cube()
# generalise this
leeds_model = cube_model[:,143,178]
leeds_era5 = cube_era5[:,143,178]
model_params = {}
model_params['a'] = np.zeros((cube_model.shape[1:3]))
model_params['loc'] = np.zeros((cube_model.shape[1:3]))
model_params['scale'] = np.zeros((cube_model.shape[1:3]))
model_params['lat'] = cube_model.coord('latitude').points
model_params['lon'] = cube_model.coord('longitude').points
era5_params = {}
era5_params['a'] = np.zeros((cube_era5.shape[1:3]))
era5_params['loc'] = np.zeros((cube_era5.shape[1:3]))
era5_params['scale'] = np.zeros((cube_era5.shape[1:3]))
era5_params['lat'] = cube_era5.coord('latitude').points
era5_params['lon'] = cube_era5.coord('longitude').points
model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178] = st.skewnorm.fit(leeds_model.data)
era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178] = st.skewnorm.fit(leeds_era5.data)
# ## How to bias correct
#
# $\hat{x}_{m,p}(t) = F^{-1}_{o,h} ( F_{m,h} (x_{m,p}(t)) )$
#
# - $x_{m,p}$ is the future predicted variable, i.e. the SSP value from the climate model
# - $F_{m,h}$ is the CDF of the historical period in the climate model
# - $F_{o,h}$ is the CDF of the historical period in the observations (or in this case, ERA5)
# F_{m,h}
# In: st.skewnorm.cdf(290, model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178])
# Out: 0.4921534798137802 # percentile of 290 K in HadGEM3 climate
# F^{-1}_{o,h}
# In: st.skewnorm.ppf(0.4921534798137802, era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178])
# Out: 290.57999427509816 # UTCI in ERA5 corresponding to this percentile.
# transfer function
def bias_correct(x, model_params, obs_params, ilat, ilon):
cdf = st.skewnorm.cdf(x, model_params['a'][ilat, ilon], model_params['loc'][ilat, ilon], model_params['scale'][ilat, ilon])
x_hat = st.skewnorm.ppf(cdf, obs_params['a'][ilat, ilon], obs_params['loc'][ilat, ilon], obs_params['scale'][ilat, ilon])
return x_hat
# ## Bias correct future simulations
#
# For now, just use 2100
modelfuturedir = '/gws/pw/j05/cop26_hackathons/bristol/project10/utci_projections_1deg/HadGEM3-GC31-LL/ssp585/r1i1p1f3/'
cube_model_future = iris.load(modelfuturedir + 'utci_3hr_HadGEM3-GC31-LL_ssp585_r1i1p1f3_gn_210001010300-210101010000.nc')
cube_model_future = cube_model_future.concatenate_cube()
leeds_model_future = cube_model_future[:,143,178]
model_future_params = {}
model_future_params['a'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['loc'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['scale'] = np.zeros((cube_model_future.shape[1:3]))
model_future_params['lat'] = cube_model_future.coord('latitude').points
model_future_params['lon'] = cube_model_future.coord('longitude').points
model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178] = st.skewnorm.fit(leeds_model_future.data)
#pl.hist(leeds_model.data, density=True, label='HadGEM3-GC31-LL 1985', alpha=0.3, bins=50)
#pl.hist(leeds_era5.data, density=True, label='ERA5-HEAT', alpha=0.3, bins=50)
#pl.hist(leeds_model_future.data, density=True, label='HadGEM3-GC31-LL 2100', alpha=0.3, bins=50)
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178]), color='tab:blue')
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178]), color='tab:orange')
#pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178]), color='tab:green')
#pl.legend()
#pl.title('Leeds grid cell')
#pl.show()
# bias correct the Leeds 2100 projections
leeds_model_future_biascorrected = bias_correct(leeds_model_future.data, model_params, era5_params, 143, 178)
pl.hist(leeds_model.data, density=True, label='HadGEM3-GC31-LL 1985', alpha=0.3, bins=50)
pl.hist(leeds_era5.data, density=True, label='ERA5-HEAT', alpha=0.3, bins=50)
pl.hist(leeds_model_future.data, density=True, label='HadGEM3-GC31-LL 2100', alpha=0.3, bins=50)
pl.hist(leeds_model_future_biascorrected, density=True, label='Bias-corrected 2100', alpha=0.3, bins=50)
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_params['a'][143,178], model_params['loc'][143,178], model_params['scale'][143,178]), color='tab:blue')
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), era5_params['a'][143,178], era5_params['loc'][143,178], era5_params['scale'][143,178]), color='tab:orange')
pl.plot(np.arange(240, 320), st.skewnorm.pdf(np.arange(240, 320), model_future_params['a'][143,178], model_future_params['loc'][143,178], model_future_params['scale'][143,178]), color='tab:green')
pl.legend()
pl.title('Leeds grid cell')
pl.show()
| [
"[email protected]"
] | |
951acdaacbf96a5af43073fe36dba77c68a2eb14 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/BsJPsiKst/python/BsJPsiKst/GetTristanWeights_paramAc.py | de47f925bc07c284ed7678d0bb08603b73f108b6 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | py | from ROOT import *
from math import *
from array import *
from Urania import PDG
from Urania.Helicity import *
from Urania import RooInterfaces as D
from Urania import *
AccessPackage("Bs2MuMu")
from smartpyROOT import *
from OurSites import *
from sympy.utilities.lambdify import lambdify
from parameters import KpiBins4 as Kpibins
#neim = sys.argv[1]
#neim = "2011p_826_861"
spins = [0,1]
## ### Generate the pdf using the tools in Urania.Helicity
A = doB2VX(spins, helicities = [1,-1], transAmp = 1)#0)
### masage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())#H.values())
phys = 0
TristanIntegral = 0
TristanWeights = {}
#BREAK
x = Symbol("helcosthetaK",real = True)
y = Symbol("helcosthetaL", real = True)
z = Symbol("helphi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi,-z)])
return function
lam_pdf_split = {}
for key in pdf_split:
pdf_split[key] = changeFreeVars(pdf_split[key])
phys += StrongPhases(key)*pdf_split[key]
if pdf_split[key]:
lam_pdf_split[key] = lambdify((x,y,z), pdf_split[key], ("numpy")) ### Lambdify it to make it faster.
TristanWeights[key] = 0# Symbol("w_" + str(list(key.atoms())[0]) + str(list(key.atoms())[1]), positive = True)
#TristanIntegral += StrongPhases(key) * TristanWeights[key]
T = TransAmpModuli
P = TransAmpPhases
##c1_psi = Symbol("c1_psi",real = True)
##c2_psi = Symbol("c2_psi",real = True)
##c3_psi = Symbol("c3_psi",real = True)
##c4_psi = Symbol("c4_psi",real = True)
##y_acc = Symbol("y_acc", positive = True)
##c2_theta = Symbol("c2_theta", real = True)
##c5_psi = -1-c1_psi - c2_psi - c3_psi - c4_psi + y_acc
##acc = (1. + c1_psi*x + c2_psi*x*x + c3_psi*x*x*x + c4_psi*x*x*x*x + c5_psi*x*x*x*x*x)*(1. + c2_theta*y*y)
##acc = acc.subs([( c1_psi, -5.20101e-01),(c2_psi, -7.33299e-01), (c3_psi, -2.90606e-01), (c4_psi, 2.69475e-01), (c2_theta, 2.76201e-01), (y_acc,0)])
def CalculateWeights(acc):
out = {}
for key in TristanWeights.keys():
TristanWeights[key] = iter_integrate(acc*pdf_split[key],(z,-Pi,Pi),(x,-1,1), (y, -1,1)).n()
if "Abs" in str(key): out[str(key).replace("Abs(A_","").replace(")**2","")+str(key).replace("Abs(A_","").replace(")**2","")]=TristanWeights[key]
else: out[str(key).replace("re(","").replace("im(","").replace("A_","").replace("*conjugate(","").replace("))","")]=TristanWeights[key]
den = out['00']
for key in out.keys(): out[key] = out[key]/den
return out
| [
"[email protected]"
] | |
a5ab4535550b8fb055e694138a48dab497767a30 | d22f8cd1a834f706d2c0cd77a814414cb4650265 | /login/login/middleware/checksum.py | 2a6998d66cfd6febc69caf3468ff43ac487ed4c2 | [
"MIT"
] | permissive | teris1994/L2py | 9e7535935f58d729453f39bee998f21240b85e8b | 07cc5d7c5d52ac4179378b29ef4873b11f6daa0c | refs/heads/master | 2023-09-01T06:21:10.625029 | 2021-10-24T12:48:18 | 2021-10-24T13:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | from common import exceptions
from common.middleware.middleware import Middleware
from login.packets.init import Init
class ChecksumMiddleware(Middleware):
@staticmethod
def verify_checksum(data):
if len(data) % 4 != 0:
return False
checksum = Int32(0)
for i in range(0, len(data) - 4, 4):
check = Int32(data[i]) & 0xFF
check |= Int32(data[i + 1]) << 8 & 0xFF00
check |= Int32(data[i + 2]) << 0x10 & 0xFF0000
check |= Int32(data[i + 3]) << 0x18 & 0xFF000000
checksum ^= check
check = Int32(data[-4:])
return check == checksum
@staticmethod
def add_checksum(response_data):
"""Adds checksum to response."""
checksum = Int32(0)
for i in range(0, len(response_data) - 4, 4):
check = Int32(response_data[i]) & 0xFF
check |= Int32(response_data[i + 1]) << 8 & 0xFF00
check |= Int32(response_data[i + 2]) << 0x10 & 0xFF0000
check |= Int32(response_data[i + 3]) << 0x18 & 0xFF000000
checksum ^= check
response_data[-4:] = checksum
@classmethod
def before(cls, session, request):
"""Checks that requests checksum match."""
if not cls.verify_checksum(request.data):
raise exceptions.ChecksumMismatch()
@classmethod
def after(cls, client, response):
"""Adds checksum to response data."""
if not isinstance(response.packet, Init):
cls.add_checksum(response.data)
| [
"[email protected]"
] | |
f27d8b36f9217b65566c972c890743a9021a09c8 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/m365securityandcompliance/v20210325preview/private_endpoint_connections_for_scc_powershell.py | 4a495f7d094a2ede5756d18b1db3a816e0a007c7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,923 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionsForSCCPowershellArgs', 'PrivateEndpointConnectionsForSCCPowershell']
@pulumi.input_type
class PrivateEndpointConnectionsForSCCPowershellArgs:
def __init__(__self__, *,
private_link_service_connection_state: pulumi.Input['PrivateLinkServiceConnectionStateArgs'],
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnectionsForSCCPowershell resource.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the service instance.
:param pulumi.Input[str] resource_name: The name of the service instance.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
"""
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Input['PrivateLinkServiceConnectionStateArgs']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: pulumi.Input['PrivateLinkServiceConnectionStateArgs']):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the service instance.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the service instance.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection associated with the Azure resource
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
class PrivateEndpointConnectionsForSCCPowershell(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The Private Endpoint Connection resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the service instance.
:param pulumi.Input[str] resource_name_: The name of the service instance.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionsForSCCPowershellArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The Private Endpoint Connection resource.
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionsForSCCPowershellArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionsForSCCPowershellArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionsForSCCPowershellArgs.__new__(PrivateEndpointConnectionsForSCCPowershellArgs)
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:m365securityandcompliance:PrivateEndpointConnectionsForSCCPowershell")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnectionsForSCCPowershell, __self__).__init__(
'azure-native:m365securityandcompliance/v20210325preview:PrivateEndpointConnectionsForSCCPowershell',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnectionsForSCCPowershell':
"""
Get an existing PrivateEndpointConnectionsForSCCPowershell resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionsForSCCPowershellArgs.__new__(PrivateEndpointConnectionsForSCCPowershellArgs)
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnectionsForSCCPowershell(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
7854b547da510307bdd078230c768958d415bf82 | 17e31331b7bf66ce11b77ff26a4ddbeb8355c53b | /2M1207ANALYSIS/plotTinyTimResult.py~ | 14780eb0b19495045732afd7925ceec7476793c6 | [] | no_license | YifZhou/Exoplanet-Patchy-Clouds | 06e314b941055b2a758c081d5b169f5b909b416c | 31c52938b22187182475872fd1550e3b9d384bf2 | refs/heads/master | 2020-12-24T16:43:02.882067 | 2015-12-11T22:26:30 | 2015-12-11T22:26:30 | 25,367,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | #! /usr/bin/env python
from __future__ import print_function
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('ggplot')
fn = '2M1207B_flt_F125W_fileInfo.csv'
df = pd.read_csv(fn, parse_dates = {'datetime':['obs date', 'obs time']}, index_col = 'datetime')
plt.plot(df.index, df['fluxA'], 's', label = '2M1207 A')
plt.plot(df.index, df['fluxB'], 'o', label = '2M1207 B')
plt.gcf().autofmt_xdate()
plt.legend(loc = 'best')
plt.xlabel('UT')
plt.ylabel('Normalized flux')
plt.show() | [
"[email protected]"
] | ||
8a3a42371a8d7d3f73a4cbf063670af54642286d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03805/s696591698.py | 98b40232bec4242d8b740de8f5e409457aaeddf4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py |
#入力
N,M = map(int,input().split())
graph = [ ]
for _ in range(N+1):
graph.append([])
for _ in range(M):
a,b = map(int,input().split())
graph[a].append(b)
graph[b].append(a)
visited = []
for _ in range(N+1):
visited.append(False)
def dfs(dep,cur):
global N,visited,graph
if dep == N:
return 1
ans = 0
for dist in graph[cur]:
if visited[dist] == False:
visited[dist] = True
ans += dfs(dep + 1,dist)
visited[dist] = False
return ans
visited[1] = True
print(dfs(1,1))
| [
"[email protected]"
] | |
71766a3c7b60cb8ab16636b0b4ab111ebb538092 | 586c97e81b448d9f4c1525205eaccc727f789ee7 | /src/buildercore/bluegreen_v2.py | ccd36a3370ccf250b7b6d2e1a6a5ecf89f323b6e | [
"MIT"
] | permissive | elifesciences/builder | 33542171fd43a454d8c45feae181037ff414874d | 7de9bb53c7e6a447a075a66023815166ea54092f | refs/heads/master | 2023-08-16T11:22:40.684539 | 2023-08-15T08:30:31 | 2023-08-15T08:30:31 | 56,778,863 | 12 | 14 | MIT | 2023-09-10T04:06:16 | 2016-04-21T14:08:05 | Python | UTF-8 | Python | false | false | 8,715 | py | """Performs blue-green actions over a load-balanced stack (ElasticLoadBalancer v2).
The nodes inside a stack are divided into two groups: blue and green.
Actions are performed separately on the two groups while they are detached from the load balancer."""
import logging
from . import core, utils, cloudformation, trop
LOG = logging.getLogger(__name__)
class SomeOutOfServiceInstances(RuntimeError):
pass
def conn(stackname):
"returns an ELBv2 connection"
return core.boto_conn(stackname, 'elbv2', client=True)
def find_load_balancer(stackname):
"returns name of the ELBv2 resource in the cloudformation template Outputs"
return cloudformation.read_output(stackname, trop.ALB_TITLE)
def info(msg, stackname, node_params):
kwargs = {'elb_name': find_load_balancer(stackname),
'iid_list': ", ".join(node_params['nodes'].keys())}
LOG.info(msg.format(**kwargs))
def divide_by_colour(node_params):
def is_blue(node):
return node % 2 == 1
def is_green(node):
return node % 2 == 0
def subset(is_subset):
subset = node_params.copy()
subset['nodes'] = {id: node for (id, node) in node_params['nodes'].items() if is_subset(node)}
subset['public_ips'] = {id: ip for (id, ip) in node_params['public_ips'].items() if id in subset['nodes'].keys()}
return subset
return subset(is_blue), subset(is_green)
def _target_group_arn_list(stackname):
"returns a list of `TargetGroup` ARNs for given `stackname`."
return [val for key, val in cloudformation.outputs_map(stackname).items() if key.startswith('ELBv2TargetGroup')]
def _target_group_health(stackname, target_group_arn):
"returns a map of target data for the given `target_group_arn`, keyed by the Target's ID (ec2 ARN)"
result = conn(stackname).describe_target_health(
TargetGroupArn=target_group_arn
)
return {target['Target']['Id']: target for target in result['TargetHealthDescriptions']}
def _target_group_nodes(stackname, node_params=None):
"""returns a map of {target-group-arn: [{'Id': target}, ...], ...} for Targets in `node_params`.
if a `Target` isn't registered with the `TargetGroup` a synthetic result is returned instead.
if `node_params` is `None` then *all* nodes are considered."""
node_params = node_params or core.all_node_params(stackname)
ec2_arns = sorted(node_params['nodes'].keys()) # predictable testing
target_groups = {}
for target_group_arn in _target_group_arn_list(stackname):
target_groups[target_group_arn] = [{'Id': ec2_arn} for ec2_arn in ec2_arns]
return target_groups
def _target_groups(stackname):
"returns a map of `{target-group-arn: [{target}, ...], ...}` for all TargetGroups attached to `stackname`"
results = {}
for target_group_arn, target_list in _target_group_nodes(stackname).items():
target_health = _target_group_health(stackname, target_group_arn)
target_results = []
for target in target_list:
ec2_arn = target['Id']
# synthetic response. actual valid response structure:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health
unknown_health = {
'Target': {
"Id": ec2_arn,
"Port": "~"
},
"TargetHealth": {
'State': 'no-health-data',
# health data is not returned for unregistered targets, so how this valid state ever gets returned I don't know ...
'Reason': 'Target.NotRegistered',
'Description': 'synthetic response, given target not registered.'
}
}
health = target_health.get(ec2_arn) or unknown_health
target_results.append(health)
results[target_group_arn] = target_results
return results
def _registered(stackname, node_params):
"returns a map of {target-arn: healthy?}"
ec2_arns = node_params['nodes'].keys()
result = {}
for target_group_arn, target_list in _target_groups(stackname).items():
for target in target_list:
if target['Target']['Id'] not in ec2_arns:
continue
# key has to be complex because target is present across multiple target-groups/ports
key = (target_group_arn, target['Target']['Id'])
# if there is a 'Reason' key then it isn't healthy/registered.
result[key] = 'Reason' not in target['TargetHealth']
return result
# ---
def register(stackname, node_params):
"register all targets in all target groups that are in node_params"
c = conn(stackname)
for target_group_arn, target_list in _target_group_nodes(stackname, node_params).items():
LOG.info("registering targets: %s", target_list)
if target_list:
c.register_targets(TargetGroupArn=target_group_arn, Targets=target_list)
def deregister(stackname, node_params):
"deregister all targets in all target groups"
c = conn(stackname)
for target_group_arn, target_list in _target_group_nodes(stackname, node_params).items():
LOG.info("deregistering targets: %s", target_list)
if target_list:
c.deregister_targets(TargetGroupArn=target_group_arn, Targets=target_list)
def wait_registered_any(stackname, node_params):
info("Waiting for registration of any on {elb_name}: {iid_list}", stackname, node_params)
def condition():
registered = _registered(stackname, node_params)
LOG.info("InService: %s", registered)
return not any(registered.values())
# needs to be as responsive as possible,
# to start deregistering the green group as soon as a blue server becomes available
utils.call_while(condition, interval=1, timeout=600)
def wait_registered_all(stackname, node_params):
info("Waiting for registration of all on {elb_name}: {iid_list}", stackname, node_params)
def condition():
registered = _registered(stackname, node_params)
LOG.info("InService: %s", registered)
return not all(registered.values())
utils.call_while(condition, interval=5, timeout=600)
def wait_deregistered_all(stackname, node_params):
info("Waiting for deregistration of all on {elb_name}: {iid_list}", stackname, node_params)
def condition():
registered = _registered(stackname, node_params)
LOG.info("InService: %s", registered)
# wait ... that isn't right. this is 'any deregistered' rather than 'all deregistered'.
# return True in registered.values() # bluegreen v1 implementation. typo?
return all(registered.values())
utils.call_while(condition, interval=5, timeout=600)
def wait_all_in_service(stackname):
"behaves similarly to `wait_registered_all`, but doesn't filter nodes, has a shorter timeout and more output."
def some_not_in_service():
target_status_by_arn = {}
for target_group in _target_groups(stackname).values():
for target in target_group:
target_status_by_arn[target['Target']['Id']] = target['TargetHealth']['State']
LOG.info("Instance statuses on %s: %s", find_load_balancer(stackname), target_status_by_arn)
return [status for status in target_status_by_arn.values() if status != 'healthy']
utils.call_while(
some_not_in_service,
interval=5,
timeout=60,
update_msg='Waiting for all instances to be in service...',
exception_class=SomeOutOfServiceInstances
)
def do(single_node_work_fn, node_params):
"""`node_params` is a dictionary:
{'stackname': ...,
'nodes': {
node-id: 0,
node-id: 1,
...,
},
'public_ips': {
node-id: ip,
node-id: ip,
...
}
"""
stackname = node_params['stackname']
wait_all_in_service(stackname)
blue, green = divide_by_colour(node_params)
info("Blue phase on {elb_name}: {iid_list}", stackname, blue)
deregister(stackname, blue)
wait_deregistered_all(stackname, blue)
core.parallel_work(single_node_work_fn, blue)
# this is the window of time in which old and new servers overlap
register(stackname, blue)
wait_registered_any(stackname, blue)
info("Green phase on {elb_name}: {iid_list}", stackname, green)
deregister(stackname, green)
wait_deregistered_all(stackname, green)
core.parallel_work(single_node_work_fn, green)
register(stackname, green)
wait_registered_all(stackname, node_params)
| [
"[email protected]"
] | |
df5293357794e1ccbcb5ff320b79d5c8e310eda0 | 2063a4c153b380c1d6d25f0ece6fb514389f9b1d | /mock/buildbot_secret.py | f9a64aec05765a936f22e9ece4058f6782435778 | [
"MIT"
] | permissive | musm/julia-buildbot | 1d097f709d3f75c0becd46a9075fa52638f745d7 | 653746334ba7106cde197c910f4125a3f2930fc0 | refs/heads/master | 2021-06-24T08:15:38.373394 | 2020-07-15T06:10:41 | 2020-07-15T06:10:41 | 211,406,721 | 0 | 0 | MIT | 2019-09-27T21:48:59 | 2019-09-27T21:48:59 | null | UTF-8 | Python | false | false | 515 | py | GITHUB_WEBHOOK_SECRET="nothing to see here"
GITHUB_OAUTH_CLIENT_ID="nothing to see here"
GITHUB_OAUTH_CLIENT_SECRET="nothing to see here"
GITHUB_STATUS_OAUTH_TOKEN="nothing to see here"
COVERALLS_REPO_TOKEN="nothing to see here"
CODECOV_REPO_TOKEN="nothing to see here"
FREEBSDCI_OAUTH_TOKEN="nothing to see here"
FQDN="buildog.julialang.org"
BUILDBOT_BRANCH="master"
db_user="nothing to see here"
db_password="nothing to see here"
DOCUMENTER_KEY="nothing to see here"
MACOS_CODESIGN_IDENTITY="nothing to see here"
| [
"[email protected]"
] | |
b1d83eb193e2280bb822881484407fa574b2b1dd | 03ec2daac0989f9b6936b1e87d8ca1b0d99f1bce | /optfn/local_attention.py | cd07243576469d8196efc9d4b5eacfb79f74b7cb | [] | no_license | SSS135/optfn | f7364dce8c1857baa90d2d6564316762c574a9ba | 48ae4f5439daa89ac54921a7642e612838c724eb | refs/heads/master | 2020-05-29T15:21:38.827291 | 2020-04-29T17:51:09 | 2020-04-29T17:51:09 | 189,217,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .tile_2d import Tile2d
import math
from torch.utils.checkpoint import checkpoint
class LocalAttention2d(nn.Module):
def __init__(self, in_channels, num_heads, key_size, kernel_size, stride=1, padding=0,
conv_kernel_size=1, conv_stride=1, conv_padding=0):
super().__init__()
self.in_channels = in_channels
self.key_size = key_size
self.num_heads = num_heads
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.out_channels = key_size * num_heads
self.attn_conv = nn.Conv2d(in_channels, 3 * key_size * num_heads, conv_kernel_size, conv_stride, conv_padding)
self.tiler = Tile2d(self.attn_conv.out_channels, kernel_size, stride, padding)
self.norm = nn.GroupNorm(3 * num_heads, 3 * key_size * num_heads)
def run_tiled(self, attn):
# (B, C, K, K, OH, OW)
tiles = self.tiler(attn)
B, C, K, _, OH, OW = tiles.shape
# (B, OH, OW, C, K, K)
tiles = tiles.permute(0, 4, 5, 1, 2, 3)
assert tiles.shape == (B, OH, OW, C, K, K)
# (B * OH * OW, NH, KS + QS + VS, K * K)
VS, KS, NH = self.key_size, self.key_size, self.num_heads
tiles = tiles.contiguous().view(B * OH * OW, NH, KS * 2 + VS, K * K)
# (B * OH * OW, NH, KS, K * K)
key, query, value = tiles.split([KS, KS, VS], dim=2)
# # (B * OH * OW, NH, KS, 1)
# query = query.mean(3, keepdim=True)
# (B * OH * OW, NH, 1, K * K)
saliency = query.transpose(-1, -2) @ key / math.sqrt(KS)
assert saliency.shape == (B * OH * OW, NH, K * K, K * K)
# (B * OH * OW, NH, 1, K * K)
mask = F.softmax(saliency, dim=-1)
# (B * OH * OW, NH, VS, 1)
out = value @ mask.transpose(-1, -2)
assert out.shape == (B * OH * OW, NH, VS, K * K)
# (B, NH, VS, OH, OW)
out = out.mean(-1).view(B, OH, OW, NH, VS).permute(0, 3, 4, 1, 2)
# (B, NH * VS, OH, OW)
out = out.view(B, NH * VS, OH, OW)
return out.contiguous()
def forward(self, input):
# (B, (KS + QS + VS) * NH, H, W)
attn = self.attn_conv(input)
attn = self.norm(attn)
return checkpoint(self.run_tiled, attn) if attn.requires_grad else self.run_tiled(attn)
class AddLocationInfo2d(nn.Module):
def __init__(self, config=((0.5, 0, 0), (1, 0, 0), (2, 0, 0), (4, 0, 0))):
super().__init__()
self.register_buffer('config', None)
self.register_buffer('harr', None)
self.register_buffer('warr', None)
self.config = torch.tensor(config, dtype=torch.float32)
self.harr = None
self.warr = None
def forward(self, input):
with torch.no_grad():
b, _, h, w = input.shape
targs = dict(device=input.device, dtype=input.dtype)
# if self.harr is None or self.harr.shape[2] != h or self.warr.shape[3] != w:
harr = torch.arange(h, **targs).div_(h - 1).view(1, 1, h, 1)
warr = torch.arange(w, **targs).div_(w - 1).view(1, 1, 1, w)
scale, hoffset, woffset = [x.view(1, -1, 1, 1) for x in torch.unbind(self.config, -1)]
harr, warr = [x.repeat(b, len(self.config), 1, 1).mul_(scale) for x in (harr, warr)]
self.harr = harr.add_(hoffset).mul_(2 * math.pi)
self.warr = warr.add_(woffset).mul_(2 * math.pi)
# else:
# harr, warr = self.harr, self.warr
# scale = self.config[:, 0].view(1, -1, 1, 1)
hrand, wrand = torch.empty((b, 2, 1, 1), **targs).uniform_(-1000, 1000).chunk(2, dim=1)
loc = (harr + hrand).sin_() + (warr + wrand).sin_()
loc.mul_(0.5)
return torch.cat([input, loc], 1)
| [
"[email protected]"
] | |
28e50a72627709735aaa8070033c738ca8ed1c72 | 3cb3702d2f3fb6729f1ea685d8439b7c2ad4f069 | /work22/q1_sol.py | c7c5943f9db4e95834bb8b91f23bdecf41c4985e | [] | no_license | ysmintor/MLAlgorithm | 95d6ceea5b16f94a039a2a5014f78ba3cdbd49d6 | 0ac119eacca336dbc9a1c22ea8a558c1761a08f4 | refs/heads/master | 2020-04-02T01:59:12.813910 | 2019-05-31T05:06:15 | 2019-05-31T05:06:15 | 153,885,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,005 | py | """
Solution to simple exercises to get used to TensorFlow API
You should thoroughly test your code.
TensorFlow's official documentation should be your best friend here
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Created by Chip Huyen ([email protected])
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
sess = tf.InteractiveSession()
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
###############################################################################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x > 30)
out = tf.gather(x, indices)
###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2) | [
"[email protected]"
] | |
3365218943d3ae9ecc58ae8e412764c6fc07228b | deca929038a88ced836ede461b3cdd951b02dfd6 | /file_upload_project/file_upload_project/settings.py | 22f59090d0178ccac5b3a733a7deb59b9992133e | [] | no_license | JeffLawrence1/Python-Django-Beginner | cff08ff5ab167ff82987b2c4fb1e33d37b1876a9 | 49c0f270f61ae31cf39562bb63c2facf7a443b8d | refs/heads/master | 2020-03-09T03:36:25.347060 | 2018-04-07T21:16:54 | 2018-04-07T21:16:54 | 128,567,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | """
Django settings for file_upload_project project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y39if6%-3f7(u_bjoxw#%wmt82xdgd%%q2^%y0wedt)$gsc$oc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.file_upload_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'file_upload_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'file_upload_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
49ff2f595084eee22a7e11f6c1d3bc65f80831bb | 57b53d243be373a7dda59eba57f7e5f315b42f29 | /tf_agents/agents/behavioral_cloning/behavioral_cloning_agent.py | d09ec695703ed7b57acde4d48b7a7fb4a309f671 | [
"Apache-2.0"
] | permissive | ageron/agents | 1d43dac37af95ca7b6772babbf5060fa1c5ec501 | af0b45c73301e0223d982b7323363c381aea2f78 | refs/heads/master | 2020-05-26T00:47:26.470180 | 2019-05-21T20:14:40 | 2019-05-21T20:15:05 | 188,054,825 | 5 | 0 | Apache-2.0 | 2019-05-22T14:25:46 | 2019-05-22T14:25:46 | null | UTF-8 | Python | false | false | 10,291 | py | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Behavioral Cloning Agents.
Implements generic form of behavioral cloning.
Users must provide their own loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tf_agents.agents import tf_agent
from tf_agents.policies import epsilon_greedy_policy
from tf_agents.policies import greedy_policy
from tf_agents.policies import q_policy
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
import gin.tf
class BehavioralCloningLossInfo(collections.namedtuple(
'BehavioralCloningLossInfo', ('loss',))):
"""Stores a per-batch-entry loss value."""
pass
@gin.configurable
class BehavioralCloningAgent(tf_agent.TFAgent):
"""An behavioral cloning Agent.
Implements behavioral cloning, wherein the network learns to clone
given experience. Users must provide their own loss functions. Note this
implementation will use a QPolicy. To use with other policies subclass this
agent and override the `_get_policies` method. Note the cloning_network must
match the requirements of the generated policies.
Behavioral cloning was proposed in the following articles:
Pomerleau, D.A., 1991. Efficient training of artificial neural networks for
autonomous navigation. Neural Computation, 3(1), pp.88-97.
Russell, S., 1998, July. Learning agents for uncertain environments.
In Proceedings of the eleventh annual conference on Computational learning
theory (pp. 101-103). ACM.
"""
# TODO(b/127327645): This causes a loop failure when RNNs are enabled.
_enable_functions = False
def __init__(
self,
time_step_spec,
action_spec,
cloning_network,
optimizer,
epsilon_greedy=0.1,
# Params for training.
loss_fn=None,
gradient_clipping=None,
# Params for debugging
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=None,
name=None):
"""Creates an behavioral cloning Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
cloning_network: A tf_agents.network.Network to be used by the agent.
The network will be called as
```
network(observation, step_type, network_state=None)
```
(with `network_state` optional) and must return a 2-tuple with elements
`(output, next_network_state)` where `output` will be passed as the
first argument to `loss_fn`, and used by a `Policy`. Input tensors will
be shaped `[batch, time, ...]` when training, and they will be shaped
`[batch, ...]` when the network is called within a `Policy`. If
`cloning_network` has an empty network state, then for training
`time` will always be `1` (individual examples).
optimizer: The optimizer to use for training.
epsilon_greedy: probability of choosing a random action in the default
epsilon-greedy collect policy (used only if a wrapper is not provided to
the collect_policy method).
loss_fn: A function for computing the error between the output of the
cloning network and the action that was taken. If None, the loss
depends on the action dtype. If the dtype is integer, then `loss_fn`
is
```python
def loss_fn(logits, action):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=action - action_spec.minimum, logits=logits)
```
If the dtype is floating point, the loss is
`tf.math.squared_difference`.
`loss_fn` must return a loss value for each element of the batch.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
NotImplementedError: If the action spec contains more than one action.
"""
tf.Module.__init__(self, name=name)
flat_action_spec = tf.nest.flatten(action_spec)
self._num_actions = [
spec.maximum - spec.minimum + 1 for spec in flat_action_spec
]
# TODO(oars): Get behavioral cloning working with more than one dim in
# the actions.
if len(flat_action_spec) > 1:
raise NotImplementedError(
'Multi-arity actions are not currently supported.')
if loss_fn is None:
loss_fn = self._get_default_loss_fn(flat_action_spec[0])
self._cloning_network = cloning_network
self._loss_fn = loss_fn
self._epsilon_greedy = epsilon_greedy
self._optimizer = optimizer
self._gradient_clipping = gradient_clipping
policy, collect_policy = self._get_policies(time_step_spec, action_spec,
cloning_network)
super(BehavioralCloningAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=1 if not cloning_network.state_spec else None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter)
def _get_default_loss_fn(self, spec):
if spec.dtype.is_floating:
return tf.math.squared_difference
if spec.shape.ndims > 1:
raise NotImplementedError(
'Only scalar and one dimensional integer actions are supported.')
# TODO(ebrevdo): Maybe move the subtraction of the minimum into a
# self._label_fn and rewrite this.
def xent_loss_fn(logits, actions):
# Subtract the minimum so that we get a proper cross entropy loss on
# [0, maximum - minimum).
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=actions - spec.minimum)
return xent_loss_fn
def _get_policies(self, time_step_spec, action_spec, cloning_network):
policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=self._cloning_network)
collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy, epsilon=self._epsilon_greedy)
policy = greedy_policy.GreedyPolicy(policy)
return policy, collect_policy
def _initialize(self):
return tf.no_op()
def _train(self, experience, weights=None):
loss_info = self._loss(experience, weights=weights)
transform_grads_fn = None
if self._gradient_clipping is not None:
transform_grads_fn = eager_utils.clip_gradient_norms_fn(
self._gradient_clipping)
loss_info = eager_utils.create_train_step(
loss_info,
self._optimizer,
total_loss_fn=lambda loss_info: loss_info.loss,
global_step=self.train_step_counter,
transform_grads_fn=transform_grads_fn,
summarize_gradients=self._summarize_grads_and_vars,
variables_to_train=lambda: self._cloning_network.trainable_weights,
)
return loss_info
@eager_utils.future_in_eager_mode
# TODO(b/79688437): Figure out how to enable defun for Eager mode.
# @tfe.defun
def _loss(self, experience, weights=None):
"""Computes loss for behavioral cloning.
Args:
experience: A `Trajectory` containing experience.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
Returns:
loss: A `LossInfo` struct.
Raises:
ValueError:
If the number of actions is greater than 1.
"""
with tf.name_scope('loss'):
actions = tf.nest.flatten(experience.action)[0]
logits, _ = self._cloning_network(
experience.observation,
experience.step_type)
boundary_weights = tf.cast(~experience.is_boundary(), logits.dtype)
error = boundary_weights * self._loss_fn(logits, actions)
if nest_utils.is_batched_nested_tensors(
experience.action, self.action_spec, num_outer_dims=2):
# Do a sum over the time dimension.
error = tf.reduce_sum(input_tensor=error, axis=1)
# Average across the elements of the batch.
# Note: We use an element wise loss above to ensure each element is always
# weighted by 1/N where N is the batch size, even when some of the
# weights are zero due to boundary transitions. Weighting by 1/K where K
# is the actual number of non-zero weight would artificially increase
# their contribution in the loss. Think about what would happen as
# the number of boundary samples increases.
if weights is not None:
error *= weights
loss = tf.reduce_mean(input_tensor=error)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='loss', data=loss, step=self.train_step_counter)
if self._summarize_grads_and_vars:
with tf.name_scope('Variables/'):
for var in self._cloning_network.trainable_weights:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
if self._debug_summaries:
common.generate_tensor_summaries('errors', error,
self.train_step_counter)
return tf_agent.LossInfo(loss, BehavioralCloningLossInfo(loss=error))
| [
"[email protected]"
] | |
8f4797c22b68ab37a8cf040014770158b6c472ef | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2339/60581/245838.py | f70511660b4a3a605ebb9a0482586e53a440cc8f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import sys
lst = []
for line in sys.stdin:
if line.strip()=="":
break
lst.append(line)
input = []
#读入处理
for i in range(0,len(lst)):
theLine = []
j = 0
while j < len(lst[i]):
str = ''
judgeWord = False
judgeNumber = False
if lst[i][j]>='A' and lst[i][j]<='Z':
judgeWord = True
str += lst[i][j]
while judgeWord:
j += 1
if j == len(lst[i]):
theLine.append(str)
break
if lst[i][j]>='A' and lst[i][j]<='Z':
str += lst[i][j]
else:
judgeWord = False
theLine.append(str)
if lst[i][j]>='0' and lst[i][j]<='9':
judgeNumber = True
str += lst[i][j]
while judgeNumber:
j += 1
if j == len(lst[i]):
theLine.append(int(str))
break
if lst[i][j]>='0' and lst[i][j]<='9':
str += lst[i][j]
else:
judgeNumber = False
theLine.append(int(str))
j += 1
input.append(theLine)
testNumber = input[0][0]
start = 1
count = 0
while count < testNumber:
reverseNumber = 0
numbers = input[start][0]
numberList = input[start+1].copy()
for i in range(0,numbers-1):
for j in range(i+1,numbers):
if numberList[i]> numberList[j]:
reverseNumber += 1
print(reverseNumber)
start += 2
count += 1 | [
"[email protected]"
] | |
92c1eb4f1f66c9d305670471cc22c3e4c381ebf3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03372/s276854525.py | 57b897a767dabcd81e8e6b4752abbd0e7252b35d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | N,C = map(int,input().split())
A = [list(map(int,input().split())) for _ in range(N)]
A.insert(0,[0,0])
F = [0 for _ in range(N+1)]
F[N] = A[N][1]-(C-A[N][0])
for i in range(N-1,0,-1):
F[i] = F[i+1]+A[i][1]-(A[i+1][0]-A[i][0])
G = [0 for _ in range(N+1)]
for i in range(1,N+1):
G[i] = G[i-1]+A[i][1]-(A[i][0]-A[i-1][0])
cmax = max(max(F),max(G))
dmax = 0
for i in range(N-1,0,-1):
dmax = max(dmax,F[i+1])
cmax = max(cmax,dmax+G[i]-A[i][0])
emax = 0
for i in range(2,N+1):
emax = max(emax,G[i-1])
cmax = max(cmax,emax+F[i]-(C-A[i][0]))
print(cmax) | [
"[email protected]"
] | |
0176da0491e0b7b552f7fd64342ea7bc8e69a77b | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/881.loud-and-rich/881.loud-and-rich.py | 0eb22c8e64a73530da24801d689eb96bb5833dc1 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | class Solution:
def loudAndRich(self, richer: List[List[int]], quiet: List[int]) -> List[int]:
| [
"[email protected]"
] | |
e0d1c815ddb30cf7f7840777d2c216dcf534a24c | 467be8fc9c975638fcb7a64d098e1526fd1c96f0 | /dlint/linters/bad_defusedxml_use.py | 95bc81cf1c4d9b13fbf7c12dd478ebb65246b99c | [
"BSD-3-Clause"
] | permissive | dlint-py/dlint | ed8d2ca0446914fceded654a2b810b7f8ad0d9d3 | 307b301cd9e280dcd7a7f9d5edfda3d58e4855f5 | refs/heads/master | 2023-04-13T08:54:52.987469 | 2023-04-10T19:27:01 | 2023-04-10T19:27:15 | 232,599,661 | 154 | 16 | BSD-3-Clause | 2023-03-09T21:21:19 | 2020-01-08T15:53:36 | Python | UTF-8 | Python | false | false | 5,772 | py | #!/usr/bin/env python
from .helpers import bad_kwarg_use
from .. import tree
class BadDefusedxmlUseLinter(bad_kwarg_use.BadKwargUseLinter):
"""This linter looks for lack of "defusedxml" parsing defenses. The
"defusedxml" library offers "forbid_dtd", "forbid_entities", and
"forbid_external" keyword arguments to prevent various XML attack
vectors[1]. All defenses should be enabled.
[1] https://pypi.org/project/defusedxml/
"""
off_by_default = False
_code = 'DUO135'
_error_tmpl = 'DUO135 enable all "forbid_*" defenses when using "defusedxml" parsing'
@property
def kwargs(self):
return [
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.lxml.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.cElementTree.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_dtd",
"predicate": tree.kwarg_not_present,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_entities",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.fromstring",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.iterparse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
{
"module_path": "defusedxml.ElementTree.parse",
"kwarg_name": "forbid_external",
"predicate": tree.kwarg_false,
},
]
| [
"[email protected]"
] | |
32a6600da729c0f3fd1643970f821d26a6615da5 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/allergies/12407861133f488faa80356443c08313.py | 6ece998ee9ac220a1f267c249ba6d91f9e465777 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 511 | py | class Allergies(list):
def __init__(self, score):
items = [ 'eggs',\
'peanuts',\
'shellfish',\
'strawberries',\
'tomatoes',\
'chocolate',\
'pollen',\
'cats' ]
self.list = []
for i in range(8):
if (1 << i) & score:
self.list.append(items[i])
def is_allergic_to(self, item):
return item in self.list
| [
"[email protected]"
] | |
fba38459a2d51b42d006592e4fd6866557f118f7 | 4f04ce5667f895889cfe54ed5f0dec6f5e7d4e4e | /bert_brain/data_sets/word_in_context.py | a0e401eb6455ea5649b200a69f747179c6b36a65 | [] | no_license | danrsc/bert_brain | e172859b7ab93b0a05ed7c5b936778fae134eabb | eca204f163018270ac6b6687c2f3b6b5b158a89c | refs/heads/master | 2022-11-28T14:32:45.420452 | 2020-08-03T00:14:42 | 2020-08-03T00:14:42 | 167,277,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | import os
import json
from dataclasses import dataclass
import numpy as np
from ..common import split_with_indices
from .input_features import RawData, KindData, ResponseKind
from .corpus_base import CorpusBase, CorpusExampleUnifier, path_attribute_field
__all__ = ['WordInContext']
@dataclass(frozen=True)
class WordInContext(CorpusBase):
path: str = path_attribute_field('word_in_context_path')
@staticmethod
def sentence_and_keyword_index(sentence, keyword, character_index):
keyword_index = None
words = list()
for w_index, (c_index, word) in enumerate(split_with_indices(sentence)):
if c_index + len(word) > character_index >= c_index:
keyword_index = w_index
words.append(word)
if keyword_index is None:
raise ValueError('Unable to match keyword index')
return words, keyword_index
@staticmethod
def _read_examples(path, example_manager: CorpusExampleUnifier, labels):
examples = list()
with open(path, 'rt') as f:
for line in f:
fields = json.loads(line.strip('\n'))
words_1, keyword_1 = WordInContext.sentence_and_keyword_index(
fields['sentence1'], fields['word'], fields['start1'])
words_2, keyword_2 = WordInContext.sentence_and_keyword_index(
fields['sentence2'], fields['word'], fields['start2'])
label = fields['label'] if 'label' in fields else 1
data_ids = -1 * np.ones(len(words_1) + len(words_2), dtype=np.int64)
data_ids[keyword_1] = len(labels)
data_ids[keyword_2] = len(labels)
examples.append(example_manager.add_example(
example_key=None,
words=words_1 + words_2,
sentence_ids=[0] * len(words_1) + [1] * len(words_2),
data_key='wic',
data_ids=data_ids,
start=0,
stop=len(words_1),
start_sequence_2=len(words_1),
stop_sequence_2=len(words_1) + len(words_2)))
labels.append(label)
return examples
@classmethod
def response_key(cls) -> str:
return 'wic'
@classmethod
def num_classes(cls) -> int:
return 2
def _load(self, example_manager: CorpusExampleUnifier, use_meta_train: bool):
labels = list()
train = WordInContext._read_examples(
os.path.join(self.path, 'train.jsonl'), example_manager, labels)
meta_train = None
if use_meta_train:
from sklearn.model_selection import train_test_split
idx_train, idx_meta_train = train_test_split(np.arange(len(train)), test_size=0.2)
meta_train = [train[i] for i in idx_meta_train]
train = [train[i] for i in idx_train]
validation = WordInContext._read_examples(
os.path.join(self.path, 'val.jsonl'), example_manager, labels)
test = WordInContext._read_examples(
os.path.join(self.path, 'test.jsonl'), example_manager, labels)
labels = np.array(labels, dtype=np.float64)
labels.setflags(write=False)
return RawData(
input_examples=train,
validation_input_examples=validation,
test_input_examples=test,
meta_train_input_examples=meta_train,
response_data={type(self).response_key(): KindData(ResponseKind.generic, labels)},
is_pre_split=True)
| [
"[email protected]"
] | |
2410138a68e12d0198596a040e18476ee91d7569 | dee143986a25fd602b67aadf82e15d2f7b18f85b | /perfect_stranger/game/pages.py | 7235807b1ada5f95df35ec3eb9200b9fd3776b30 | [
"MIT"
] | permissive | cesslab/otree-perfect-stranger-matching | ea3bace81d2cd810a3197c67648ed6584839bfd9 | 0f5a4fc2beac0176d86f622a23e07511026f77cc | refs/heads/master | 2022-12-12T17:55:44.895739 | 2020-02-08T14:50:46 | 2020-02-08T14:50:46 | 239,089,905 | 0 | 0 | NOASSERTION | 2022-12-08T03:35:36 | 2020-02-08T07:55:53 | Python | UTF-8 | Python | false | false | 657 | py | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class MyPage(Page):
def vars_for_template(self):
subsessions = self.session.get_subsessions()
matrices = [subsession.get_group_matrix() for subsession in subsessions]
return {
'other_player': self.player.get_others_in_group()[0],
'matrix': self.subsession.get_group_matrix(),
'group_matrices': matrices
}
# class ResultsWaitPage(WaitPage):
# def after_all_players_arrive(self):
# pass
# class Results(Page):
# pass
page_sequence = [MyPage]
| [
"[email protected]"
] | |
44bf5c472b6cccee9a48f3f665f2d484bf92a851 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/xh5.py | 4dd30f7721e2f84d44b8edcf20cf6bbce2a180bb | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'xh5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
7ed601dcf4757b3e86143ca0ec316307eb2303e2 | a50487ba59c7ce04854f9004b9752a32823b7b2a | /src/server.py | 347e07bc1763ec6c906308c9d56625708ab15516 | [] | no_license | shonenada-archives/sqlite-sync | 909062646b01f80cf621b5527f168049e9012e76 | f1379939893cebfffae701904ef12d6b4e4e18ea | refs/heads/master | 2021-01-01T05:13:56.854536 | 2016-06-04T17:53:22 | 2016-06-04T17:53:22 | 59,564,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | # -*- coding: utf-8 -*-
import os
import sys
import socket
import base64
import sqlite3
HOST = '0.0.0.0'
PORT = 23333
MAX_CONNECTIONS = 1
SEGMENT_SZIE = 1024
DB_PATH = './dbs/sync.db'
db = sqlite3.connect(DB_PATH)
cursor = db.cursor()
def invalid_command(params):
return 'Invalid command'
def ping_command(params):
return 'Pong'
def last_command(params):
cursor.execute('SELECT id FROM images ORDER BY ID DESC LIMIT 1')
rs = cursor.fetchone()
if rs:
return str(rs[0])
else:
return None
def sync_command(params):
id_ = params
cursor.execute('SELECT id, data FROM images WHERE id > ? ORDER BY ID LIMIT 1', (id_,))
data = cursor.fetchone()
img = base64.b64encode(data[1])
packet = '{} {}'.format(data[0], img)
if data is None:
return None
return packet
def shutdown(params):
raise IOError()
class Server(object):
commands = {
'PING': ping_command,
'LAST': last_command,
'SYNC': sync_command,
'SHUTDOWN': shutdown,
}
def __init__(self, host, port):
self.host = host
self.port = port
self.server = None
def run(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host, self.port))
self.server.listen(MAX_CONNECTIONS)
print 'listen %s:%s' % (self.host, self.port)
while True:
connection, address = self.server.accept()
print 'Connected from %s' % str(address)
while True:
msg = connection.recv(SEGMENT_SZIE)
if msg is not None:
split_msg = msg.split(' ', 1)
if len(split_msg) > 1:
command, params = split_msg
else:
command = split_msg[0]
params = None
# print command
if command == 'CLOSE':
break
command_handler = self.commands.get(command, invalid_command)
result = command_handler(params)
if result is not None:
connection.send(result + '\r\n\r\n')
connection.close()
def main():
if len(sys.argv) == 1:
host, port = HOST, PORT
elif len(sys.argv) == 2:
host = sys.argv[1]
port = PORT
elif len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
server = Server(host, port)
server.run()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d472390654a9ff3499afe1db639ef2ad8891fa67 | e811a08b8b653da94e516ca147ec49b534f74a62 | /test/Test_unittest.py | 770f9e14cf79477dded396b3d430e617b5f53bf2 | [] | no_license | HoYaStudy/Python_Study | 0feb4a9ba7e68ebea6b2db15b20a3680f979a4de | 59c2cc093ae8ae87c8e07365cc432d87ded29ccc | refs/heads/master | 2023-02-07T23:40:16.135565 | 2023-01-24T06:17:58 | 2023-01-24T06:17:58 | 200,445,372 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,493 | py | # Import Module --------------------------------------------------------------#
import unittest
# Class Definition to Test ---------------------------------------------------#
class TestClass1:
pass
class TestClass2:
pass
# Test Suite Class Definition ------------------------------------------------#
class TestSuite(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEqual(self):
"""
actual == expected
"""
actual = 1
expected = 1
self.assertEqual(actual, expected)
def testNotEqual(self):
"""
actual != expected
"""
actual = 1
expected = 2
self.assertNotEqual(actual, expected)
def testTrue(self):
"""
bool(value) is True
"""
value = True
self.assertTrue(value)
def testFalse(self):
"""
bool(value) is False
"""
value = False
self.assertFalse(value)
def testIs(self):
"""
value1 is value2
"""
value1 = TestClass1()
value2 = value1
self.assertIs(value1, value2)
def testIsNot(self):
"""
value1 is not value2
"""
value1 = TestClass1()
value2 = TestClass2()
self.assertIsNot(value1, value2)
def testIsNone(self):
"""
value is None
"""
value = None
self.assertIsNone(value)
def testIsNotNone(self):
"""
value is not None
"""
value = "test"
self.assertIsNotNone(value)
def testIn(self):
"""
value1 in value2
"""
value1 = 1
value2 = range(6)
self.assertIn(value1, value2)
def testNotIn(self):
"""
value1 not in value2
"""
value1 = 7
value2 = range(6)
self.assertNotIn(value1, value2)
def testIsInstance(self):
"""
isinstance(value1, value2)
"""
value1 = TestClass1()
value2 = TestClass1
self.assertIsInstance(value1, value2)
def testNotIsInstance(self):
"""
not isinstance(value1, value2)
"""
value1 = TestClass1()
value2 = TestClass2
self.assertNotIsInstance(value1, value2)
def testAlmostEqual(self):
"""
round(value1 - value2, 7) == 0
"""
value1 = 1.23456789
value2 = 1.23456788
self.assertAlmostEqual(value1, value2)
def testNotAlmostEqual(self):
"""
round(value1 - value2, 7) != 0
"""
value1 = 3.14
value2 = 3.15
self.assertNotAlmostEqual(value1, value2)
def testGreater(self):
"""
value1 > value2
"""
value1 = 5
value2 = 3
self.assertGreater(value1, value2)
def testGreaterEqual(self):
"""
value1 >= value2
"""
value1 = 5
value2 = 3
self.assertGreaterEqual(value1, value2)
def testLess(self):
"""
value1 < value2
"""
value1 = 2
value2 = 4
self.assertLess(value1, value2)
def testLessEqual(self):
"""
value1 <= value2
"""
value1 = 2
value2 = 4
self.assertLessEqual(value1, value2)
def testRegex(self):
"""
value2.search(value1)
"""
value1 = "test"
value2 = "e"
self.assertRegex(value1, value2)
def testNotRegex(self):
"""
not value2.search(value1)
"""
value1 = "test"
value2 = "a"
self.assertNotRegex(value1, value2)
def testCountEqual(self):
"""
value1 and value2 have the same elements in the same number,
regardless of their order.
"""
value1 = "abcde"
value2 = "ecbda"
self.assertCountEqual(value1, value2)
def testMultiLineEqual(self):
str1 = "T\
E\
S\
T"
str2 = "T\
E\
S\
T"
self.assertMultiLineEqual(str1, str2)
def testSuquenceEqual(self):
seq1 = range(6)
seq2 = range(6)
self.assertSequenceEqual(seq1, seq2)
def testListEqual(self):
list1 = [1, 2, 3]
list2 = [1, 2, 3]
self.assertListEqual(list1, list2)
def testTupleEqual(self):
tuple1 = (1, 2, 3)
tuple2 = (1, 2, 3)
self.assertTupleEqual(tuple1, tuple2)
def testSetEqual(self):
set1 = set([1, 2, 3])
set2 = set([3, 2, 1])
self.assertSetEqual(set1, set2)
def testDictEqual(self):
dict1 = {"key1": "value1", "key2": "value2"}
dict2 = {"key2": "value2", "key1": "value1"}
self.assertDictEqual(dict1, dict2)
def testAdd(self):
params = ((3, {"a": 1, "b": 2}), (5, {"a": 2, "b": 3}), (7, {"a": 3, "b": 4}))
for expected, param in params:
with self.subTest(**param):
actual = param["a"] + param["b"]
self.assertEqual(actual, expected)
@unittest.skip("This test will be skipped")
def testSkip(self):
pass
@unittest.skipIf(2 > 1, "This test will be skipped")
def testSkipIf(self):
pass
# Main -----------------------------------------------------------------------#
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
69e315508feadd87fb9985595579dfdc419928cc | cbaa8c30fb4352c241dc81c1c6cc5079aad2526b | /primitive_types/swap_bits.py | 46baf9f2be943e1dc7e0ec147f3544b02c89f550 | [] | no_license | Fatou1993/interviews-preparation | 68b7f870a4a0e93717f97ec1a4e1438bbb2a3410 | 1b733a79007362a4816a896ebd6c199b4098af36 | refs/heads/master | 2021-05-04T00:47:16.363097 | 2018-02-05T19:29:53 | 2018-02-05T19:29:53 | 120,351,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | def swap_bits(x, i, j):
if (x >> i)&1 != (x >> j)&1 :
mask = (1<<i)|(1<<j)
x ^= mask
return x | [
"[email protected]"
] | |
b9c7959458aceb44a3dafe8e626e9ad91f88343a | 02800e659f2088550a402d7d7d8e3902560893e3 | /merf/internal/configs.py | 783fbcf0eb66cf46b5722afa9c8dbd741cc86fd4 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | antonpolishko/google-research | ab8a445d5401185eadfe757e73dc8bcf34da8dea | 1b4e7db5f90bcb4f80803383a81d8613ebfdfeec | refs/heads/master | 2023-08-31T06:38:33.963505 | 2023-08-26T16:33:48 | 2023-08-26T16:37:57 | 422,090,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,732 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for handling configurations."""
# pylint: disable=g-importing-member
import dataclasses
from dataclasses import field
from os import path
from typing import Any, Optional, Tuple
from absl import flags
from flax.core import FrozenDict
import gin
from internal import schedule
from internal import utils
import jax
import jax.numpy as jnp
configurables = {
'jnp': [
jnp.reciprocal,
jnp.log,
jnp.log1p,
jnp.exp,
jnp.sqrt,
jnp.square,
jnp.sum,
jnp.mean,
],
'jax.nn': [jax.nn.relu, jax.nn.softplus, jax.nn.silu],
'jax.nn.initializers.he_normal': [jax.nn.initializers.he_normal()],
'jax.nn.initializers.he_uniform': [jax.nn.initializers.he_uniform()],
'jax.nn.initializers.glorot_normal': [jax.nn.initializers.glorot_normal()],
'jax.nn.initializers.glorot_uniform': [
jax.nn.initializers.glorot_uniform()
],
}
for module, configurables in configurables.items():
for configurable in configurables:
gin.config.external_configurable(configurable, module=module)
@gin.configurable()
@dataclasses.dataclass
class Config:
"""Configuration flags for everything."""
# Paths.
checkpoint_dir: Optional[str] = None # Where to log checkpoints.
data_dir: Optional[str] = None # Input data directory.
# Representation.
triplane_resolution: int = 2048 # Planes will have dimensions (T, T) where
# T = triplane_resolution.
sparse_grid_resolution: int = 512 # Voxel grid will have dimensions (S, S, S)
# where S = sparse_grid_resolution.
num_samples_per_voxel: int = 1 # Only affects rendering from the baked
# representation.
data_block_size: int = 8 # Block size for the block-sparse 3D grid
# (see SNeRG).
range_features: Tuple[float, float] = field(
default_factory=lambda: (-7.0, 7.0)
) # Value range for appearance features.
range_density: Tuple[float, float] = field(
default_factory=lambda: (-14.0, 14.0)
) # Value range for density features.
# Control flow.
max_steps: int = 25000 # Number of optimization steps.
batch_size: int = 65536 # The number of rays/pixels in each batch.
render_chunk_size: int = 65536 # Chunk size for whole-image renderings.
checkpoint_every: int = 5000 # Steps to save a checkpoint.
print_every: int = 100 # Steps between printing losses.
train_render_every: int = 500 # Steps between validation renders
cast_rays_in_train_step: bool = True # If True, compute rays in train step.
gradient_accumulation_steps: int = 8 # Increase this value when running OOM.
stop_after_training: bool = False
stop_after_testing: bool = False
stop_after_compute_alive_voxels: bool = False
render_train_set: bool = False
model_seed: int = 6550634 # This seed is used to initalize model parameters.
# Loss weights.
data_loss_mult: float = 1.0 # Mult for the finest data term in the loss.
charb_padding: float = 0.001 # The padding used for Charbonnier loss.
interlevel_loss_mult: float = 1.0 # Mult. for the loss on the proposal MLP.
distortion_loss_mult: float = 0.01 # Multiplier on the distortion loss.
yu_sparsity_loss_mult: Optional[schedule.Schedule] = schedule.ConstSchedule(
0.01
) # Multiplier for sparsity loss.
num_random_samples: int = 2**17 # For sparsity loss
alpha_threshold: Optional[schedule.Schedule] = schedule.LogLerpSchedule(
start=10000, end=20000, v0=0.0005, v1=0.005, zero_before_start=True
) # Multiplier for alpha-culling-simulation loss.
param_regularizers: FrozenDict[str, Any] = FrozenDict({
'DensityAndFeaturesMLP_0/HashEncoding_0': (0.03, jnp.mean, 2, 1),
'PropMLP_0/PropHashEncoding_0': (0.03, jnp.mean, 2, 1),
}) # Fine-grained parameter regularization strength.
# Optimization.
lr_init: float = 1e-2 # The initial learning rate.
lr_final: float = 1e-3 # The final learning rate.
lr_delay_steps: int = 100 # The number of "warmup" learning steps.
lr_delay_mult: float = 0.01 # How much sever the "warmup" should be.
adam_beta1: float = 0.9 # Adam's beta2 hyperparameter.
adam_beta2: float = 0.99 # Adam's beta2 hyperparameter.
adam_eps: float = 1e-15 # Adam's epsilon hyperparameter.
grad_max_norm: float = 0.001 # Gradient clipping magnitude, disabled if == 0.
grad_max_val: float = 0.0 # Gradient clipping value, disabled if == 0.
# Data loading.
dataset_loader: str = 'llff' # The type of dataset loader to use.
batching: str = 'all_images' # Batch composition, [single_image, all_images].
patch_size: int = 1 # Resolution of patches sampled for training batches.
factor: int = 4 # The downsample factor of images, 0 for no downsampling.
# Load images in COLMAP vs alphabetical ordering (affects heldout test set).
load_alphabetical: bool = True
forward_facing: bool = False # Set to True for forward-facing LLFF captures.
llffhold: int = 8 # Use every Nth image for the test set. Used only by LLFF.
# If true, use all input images for training.
llff_load_from_poses_bounds: bool = False # If True, load camera poses of
# LLFF data from poses_bounds.npy.
llff_use_all_images_for_training: bool = False
use_tiffs: bool = False # If True, use 32-bit TIFFs. Used only by Blender.
randomized: bool = True # Use randomized stratified sampling.
near: float = 0.2 # Near plane distance.
far: float = 1e6 # Far plane distance.
vocab_tree_path: Optional[str] = None # Path to vocab tree for COLMAP.
def define_common_flags():
flags.DEFINE_multi_string('gin_bindings', None, 'Gin parameter bindings.')
flags.DEFINE_multi_string('gin_configs', None, 'Gin config files.')
def load_config(save_config=True):
"""Load the config, and optionally checkpoint it."""
gin.parse_config_files_and_bindings(
flags.FLAGS.gin_configs, flags.FLAGS.gin_bindings, skip_unknown=True
)
config = Config()
if save_config and jax.host_id() == 0:
utils.makedirs(config.checkpoint_dir)
with utils.open_file(
path.join(config.checkpoint_dir, 'config.gin'), 'w'
) as f:
f.write(gin.config_str())
return config
| [
"[email protected]"
] | |
75c37893e9690925f4f47ba04135171f0874ad45 | 5cda8f2070c83341fc7218946213e11788ec7543 | /reg_task/red_winedata.py | e387c7214cae7f6d523b5ff85e0d4aa5f47d7b54 | [] | no_license | masknugget/somethings | 9b833abb2ee4df27118177f3d8c1523916fe957a | a5c0ceaf5e98c68715ce2b2bad89256b76c2118d | refs/heads/master | 2020-09-14T17:18:56.128901 | 2020-03-21T08:29:30 | 2020-03-21T08:29:30 | 223,197,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | import numpy as np, pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, \
Ridge, Lasso, ElasticNet, SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt, seaborn as sns
def get_scores(model, Xtest, ytest):
y_pred = model.predict(Xtest)
return np.sqrt(mean_squared_error(ytest, y_pred)), \
model.__class__.__name__
if __name__ == "__main__":
br = '\n'
d = dict()
X = np.load('data/X_red.npy')
y = np.load('data/y_red.npy')
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
print('rmse (unscaled):')
rfr = RandomForestRegressor(random_state=0, n_estimators=100)
rfr.fit(X_train, y_train)
rmse, rfr_name = get_scores(rfr, X_test, y_test)
d['rfr'] = [rmse]
print(rmse, '(' + rfr_name + ')')
lr = LinearRegression().fit(X_train, y_train)
rmse, lr_name = get_scores(lr, X_test, y_test)
d['lr'] = [rmse]
print(rmse, '(' + lr_name + ')')
ridge = Ridge(random_state=0).fit(X_train, y_train)
rmse, ridge_name = get_scores(ridge, X_test, y_test)
d['ridge'] = [rmse]
print(rmse, '(' + ridge_name + ')')
lasso = Lasso(random_state=0).fit(X_train, y_train)
rmse, lasso_name = get_scores(lasso, X_test, y_test)
d['lasso'] = [rmse]
print(rmse, '(' + lasso_name + ')')
en = ElasticNet(random_state=0).fit(X_train, y_train)
rmse, en_name = get_scores(en, X_test, y_test)
d['en'] = [rmse]
print(rmse, '(' + en_name + ')')
sgdr = SGDRegressor(random_state=0, max_iter=1000, tol=0.001)
sgdr.fit(X_train, y_train)
rmse, sgdr_name = get_scores(sgdr, X_test, y_test)
print(rmse, '(' + sgdr_name + ')', br)
scaler = StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_test_std = scaler.fit_transform(X_test)
print('rmse scaled:')
lr_std = LinearRegression().fit(X_train_std, y_train)
rmse, lr_std_name = get_scores(lr_std, X_test_std, y_test)
print(rmse, '(' + lr_std_name + ')')
rr_std = Ridge(random_state=0).fit(X_train_std, y_train)
rmse, rr_std_name = get_scores(rr_std, X_test_std, y_test)
print(rmse, '(' + rr_std_name + ')')
lasso_std = Lasso(random_state=0).fit(X_train_std, y_train)
rmse, lasso_std_name = get_scores(lasso_std, X_test_std, y_test)
print(rmse, '(' + lasso_std_name + ')')
en_std = ElasticNet(random_state=0).fit(X_train_std, y_train)
rmse, en_std_name = get_scores(en_std, X_test_std, y_test)
print(rmse, '(' + en_std_name + ')')
sgdr_std = SGDRegressor(random_state=0, max_iter=1000, tol=0.001)
sgdr_std.fit(X_train_std, y_train)
rmse, sgdr_std_name = get_scores(sgdr_std, X_test_std, y_test)
d['sgdr_std'] = [rmse]
print(rmse, '(' + sgdr_std_name + ')', br)
pipe = Pipeline([('poly', PolynomialFeatures(degree=2)),
('linear', LinearRegression())])
model = pipe.fit(X_train, y_train)
rmse, poly_name = get_scores(model, X_test, y_test)
d['poly'] = [rmse]
print(PolynomialFeatures().__class__.__name__, '(rmse):')
print(rmse, '(' + poly_name + ')')
algo, rmse = [], []
for key, value in d.items():
algo.append(key)
rmse.append(value[0])
plt.figure('RMSE')
sns.set(style="whitegrid")
ax = sns.barplot(algo, rmse)
plt.title('Red Wine Algorithm Comparison')
plt.xlabel('regressor')
plt.ylabel('RMSE')
plt.show()
| [
"[email protected]"
] | |
a48484aee293a6b54e8aa54af7b99820b5111cd8 | 9cf6a19289e9335f32f1081832dff33e9f1fdc86 | /examples/flask_ext/flask_ext.py | 5f8b0aa4267df72ca84477842773615dfba3c469 | [
"MIT"
] | permissive | SmartManoj/quart | 3f25e7c27d29d930139bea1d34d375f476c897ac | 317562ea660edb7159efc20fa57b95223d408ea0 | refs/heads/master | 2020-06-06T10:25:45.512773 | 2019-06-09T20:11:02 | 2019-06-09T20:11:02 | 192,714,053 | 1 | 0 | MIT | 2019-06-19T10:51:28 | 2019-06-19T10:51:27 | null | UTF-8 | Python | false | false | 2,110 | py | import quart.flask_patch
from secrets import compare_digest
import flask_login
from quart import Quart, redirect, request, url_for
app = Quart(__name__)
app.secret_key = 'secret' # Create an actual secret key for production
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
# Rather than storing passwords in plaintext, use something like
# bcrypt or similar to store the password hash.
users = {'quart': {'password': 'secret'}}
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(username):
if username not in users:
return
user = User()
user.id = username
return user
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
password = request.form.get('password', '')
if username not in users:
return
user = User()
user.id = username
user.is_authenticated = compare_digest(password, users[username]['password'])
return user
@app.route('/', methods=['GET', 'POST'])
async def login():
if request.method == 'GET':
return '''
<form method='POST'>
<input type='text' name='username' id='username' placeholder='username'></input>
<input type='password' name='password' id='password' placeholder='password'></input>
<input type='submit' name='submit'></input>
</form>
'''
username = (await request.form)['username']
password = (await request.form)['password']
if username in users and compare_digest(password, users[username]['password']):
user = User()
user.id = username
flask_login.login_user(user)
return redirect(url_for('protected'))
return 'Bad login'
@app.route('/protected')
@flask_login.login_required
async def protected():
return 'Logged in as: ' + flask_login.current_user.id
@app.route('/logout')
async def logout():
flask_login.logout_user()
return 'Logged out'
@login_manager.unauthorized_handler
def unauthorized_handler():
return 'Unauthorized'
| [
"[email protected]"
] | |
52cae2c239cddc1e62d8442bee69791c62f20002 | 23e55ab51e322a3c0f967976a84f42f70f8ab701 | /tensorflow/python/ops/nn_loss_scaling_utilities_test.py | 427972f5ce13f1401a30ba675a043d9c63486d56 | [
"Apache-2.0"
] | permissive | thangnvit/tensorflow | f58e7c2f95690f337361aa2973f2b84ac7e7f947 | c83887196eb717af66a7b3f008e970b4a226ff8f | refs/heads/master | 2021-02-21T17:51:56.030461 | 2020-03-06T07:55:33 | 2020-03-06T07:58:38 | 245,362,540 | 3 | 0 | Apache-2.0 | 2020-03-06T08:05:41 | 2020-03-06T08:05:40 | null | UTF-8 | Python | false | false | 7,872 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loss scaling utilities in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test as test_lib
class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
def setUp(self):
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(LossUtilitiesTest, self).setUp()
def testComputeAverageLossGlobalBatchSize(self):
per_example_loss = [1, 2, 3, 4, 5]
loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10)
self.assertEqual(self.evaluate(loss), 1.5)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossDefaultGlobalBatchSize(self, distribution):
# Without strategy - num replicas = 1
per_example_loss = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.compute_average_loss(per_example_loss)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss, args=(per_example_loss,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossSampleWeights(self, distribution):
with distribution.scope():
# Scalar sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3)
# Per example sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": [0.3, 0.5, 0.2]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3)
# Time-step sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([[2., 0.5], [4., 1.]],),
kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
def testComputeAverageLossInvalidSampleWeights(self):
with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
"Dimensions must be equal")):
nn_impl.compute_average_loss([2.5, 6.2, 5.],
sample_weight=[0.2, 0.8],
global_batch_size=10)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossDtype(self, distribution):
with distribution.scope():
per_example_loss = constant_op.constant([2., 4., 6.],
dtype=dtypes.float64)
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=(per_example_loss,),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertEqual(loss.dtype, dtypes.float64)
def testComputeAverageLossInvalidRank(self):
per_example_loss = constant_op.constant(2)
# Static rank
with self.assertRaisesRegex(
ValueError, "Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1,"):
nn_impl.compute_average_loss(per_example_loss)
with context.graph_mode():
# Dynamic rank
per_example_loss = array_ops.placeholder(dtype=dtypes.float32)
loss = nn_impl.compute_average_loss(per_example_loss)
with self.cached_session() as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
sess.run(loss, {per_example_loss: 2})
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError,
"You are calling `compute_average_loss` in cross replica context"):
nn_impl.compute_average_loss([2, 3])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLoss(self, distribution):
# Without strategy - num replicas = 1
reg_losses = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.scale_regularization_loss(reg_losses)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.scale_regularization_loss, args=(reg_losses,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "You are calling `scale_regularization_loss` in "
"cross replica context"):
nn_impl.scale_regularization_loss([2, 3])
if __name__ == "__main__":
test_lib.main()
| [
"[email protected]"
] | |
bd5e34f3398b5facd631a6575e61d6dee48981a9 | b873ea1def0810f67834bf4926901b9a8fead362 | /exam_preparation_10_21/problem_1 - taxi_express.py | 214d77abc7ad1bb307fe421628f0f89d08382383 | [] | no_license | NikiDimov/SoftUni-Python-Advanced | 20f822614fa0fa7de6ded3956fa8d40d589a4a86 | d6c1fe886a3c27c82f03e5e4a6c670f0905d54e6 | refs/heads/main | 2023-08-23T17:42:32.063057 | 2021-10-25T10:32:03 | 2021-10-25T10:32:03 | 328,750,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from collections import deque
customers = deque(int(el) for el in input().split(', '))
taxis = [int(el) for el in input().split(', ')]
total_time = 0
while customers and taxis:
if customers[0] <= taxis[-1]:
total_time += customers.popleft()
taxis.pop()
else:
taxis.pop()
if not customers:
print(f"All customers were driven to their destinations\nTotal time: {total_time} minutes")
if not taxis and customers:
print(f"Not all customers were driven to their destinations\nCustomers left: {', '.join(map(str,customers))}")
| [
"[email protected]"
] | |
b6df72d27ae81c311084682cded4083282df6a16 | 11c39c48a02d25f2dffab7db76649949a0cca5e5 | /venv/bin/gunicorn_django | 3793a5d4203c3914062d0b6f343e9a2816ec6fc6 | [] | no_license | lssdeveloper/djangoecommerce | 3a1fb8e9208264e143142b112f7ed93fe3654dfe | f93b23dad7c4753cad23cb87f329226aacf1a2f6 | refs/heads/main | 2023-01-03T02:48:52.010251 | 2020-11-05T01:17:00 | 2020-11-05T01:17:00 | 310,119,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/home/leandro/djangoecommerce/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.djangoapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
8a509f4b1924614dc3b87e2b87e2cb1716aa792c | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_6208.py | 93485a216bd02c33878868b948176cbbe6a5050b | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((556, 471, 306), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((897, 721, 126), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((695, 488, 509), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((760, 54, 917), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((737, 131, 568), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((10, 544, 10), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((919, 877, 152), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((547, 784, 262), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((851, 120, 466), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((949, 188, 84), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((153, 179, 743), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((742, 895, 140), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((664, 562, 878), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((727, 688, 584), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((179, 781, 550), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((564, 808, 971), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((141, 248, 291), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((625, 371, 591), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((411, 435, 483), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((544, 429, 684), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((318, 291, 443), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
40736e3e38b08dc2b3ebfc6d484dbfb39a27d10d | c463e77c3d76e6b4810e202541d3f3f7f91bcf60 | /build/moveit_robots/r2_moveit_generated/catkin_generated/pkg.develspace.context.pc.py | 0a5ebbbca222f990db00a7a2183202ae22158a7e | [] | no_license | MGRNascimento/Tese | 18087ee59dfee96ee000c9f16c646d1750174285 | bf78d417849a74d9c5a520d40dcbebeadf084706 | refs/heads/master | 2020-06-23T13:57:01.699657 | 2019-10-23T21:47:19 | 2019-10-23T21:47:19 | 198,638,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "r2_moveit_generated"
PROJECT_SPACE_DIR = "/home/miguel/catkin_ws/devel"
PROJECT_VERSION = "1.1.1"
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.