ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5bda72faf2157deb02de5225bc5aa41a6140a1
|
import numpy as np
import scipy as sp
from scipy.linalg import block_diag
from qpsolvers import solve_qp
import sympy as sy
from sympy.physics import mechanics
from scipy.signal import cont2discrete
class SimModel(object):
def __init__(self, param=None, NX=None, NU=None):
assert param is not None
assert NX is not None
assert NU is not None
self.param = param
self.NX = NX
self.NU = NU
self.jacA, self.jacB = self.genLinModel()
self.force = self.genDynamicEquation()
def SYMPY_rh_eq(self):
raise NotImplementedError('SYMPY_rh_eq is not implemented')
def genJacobian(self):
MAT = self.SYMPY_rh_eq()
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
return MAT.jacobian(q), MAT.jacobian(u)
def genDynamicEquation(self):
q = sy.symbols("q:{0}".format(self.NX))
u = sy.symbols("u:{0}".format(self.NU))
return sy.lambdify([q,u], self.SYMPY_rh_eq(), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"])
def genLinModel(self):
q = sy.symbols("q:{0}".format(self.NX))
u = sy.symbols("u:{0}".format(self.NU))
A, B = self.genJacobian()
return (sy.lambdify([q,u], np.squeeze(A), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"]),
sy.lambdify([q,u], np.squeeze(B), [{'atan':np.arctan, 'atan2':np.arctan2}, "numpy"]))
def genDModel(self, x, dq, u, dT=0.1):
vector = np.hstack((x, dq))
f = self.force(vector, u).T.flatten()
A_c = np.array(self.jacA(vector, u))
B_c = np.array(self.jacB(vector, u))
g_c = f - A_c@vector - B_c@u
B = np.hstack((B_c, g_c.reshape((-1,1))))
A_d, B_d, _, _, _ = cont2discrete((A_c, B, 0, 0), dT)
g_d = B_d[:,self.NU]
B_d = B_d[:,0:self.NU]
return A_d, B_d, g_d
def PredictForwardEuler(self, x, dq, u, dt):
vector = np.hstack((x, dq))
d_vector = self.force(vector, u).T.flatten()
vector = vector + dt * d_vector
return vector[0:3], vector[3:6]
class NMPC():
def __init__(self, dT=0.02, time_horizon = 20,
H = None, J = None, q = None,
RH = None, RJ = None, r = None,
H_N = None, J_N = None, q_N = None,
dmodel = None,
G = None, h = None,
normalization_x = None,
normalization_u = None,
x_ubounds=[], x_lbounds=[],
u_ubounds=[], u_lbounds=[]):
assert H != None
assert J != None
assert H_N != None
assert J_N != None
assert dmodel != None
self.dT = dT
self.time_horizon = time_horizon
self.model = dmodel
self.x_l = np.asarray(x_lbounds,dtype=np.float64)
self.x_u = np.asarray(x_ubounds,dtype=np.float64)
self.u_l = np.asarray(u_lbounds,dtype=np.float64)
self.u_u = np.asarray(u_ubounds,dtype=np.float64)
self.NX = self.x_u.shape[0]
self.NU = self.u_u.shape[0]
class StaticStageCost():
def __init__(self, weight):
self.weight = np.asarray(weight)
def __call__(self, x_guess, u_guess, x_ref):
return self.weight
class StaticValueFunction():
def __init__(self, weight):
self.weight = np.asarray(weight)
def __call__(self, x_guess, x_ref):
return self.weight
self.H = H if callable(H) else StaticStageCost(H)
self.J = J if callable(J) else StaticStageCost(J)
self.H_N = H_N if callable(H_N) else StaticValueFunction(H_N)
self.J_N = J_N if callable(J_N) else StaticValueFunction(J_N)
self.R_H = RH if callable(RH) else StaticStageCost(RH)
self.R_J = RJ if callable(RJ) else StaticStageCost(RJ)
if q is None:
q = np.zeros(self.NX)
self.q = q
if r is None:
r = np.zeros(self.NU)
self.r = r
if G is None:
self.G = None
self.h = None
else:
assert h is not None
self.G = G if callable(H) else StaticStageCost(H)
self.h = h if callable(J) else StaticStageCost(J)
if normalization_x is not None:
self.Norm = np.diag(normalization_x*(time_horizon+1) + normalization_u*time_horizon)
self.Norm_inv = np.linalg.inv(self.Norm)
else:
self.Norm = None
self.Norm_inv = None
def iterate_NMPC(self, x_guess, u_guess, x_ref, verbose=False, warmstart=False):
T = self.time_horizon
X_DIM = self.NX*(T+1)
U_DIM = self.NU*(T)
P_Q_blocks = []
q_q_blocks = []
P_R_blocks = []
q_r_blocks = []
for k in range(T+1):
if k==T:
P_Q_blocks.append(self.H_N(x_guess[:,k], x_ref[:,k]))
q_q_blocks.append(self.J_N(x_guess[:,k], x_ref[:,k])+self.q)
else:
P_Q_blocks.append(self.H(x_guess[:,k], u_guess[:,k], x_ref[:,k]))
q_q_blocks.append(self.J(x_guess[:,k], u_guess[:,k], x_ref[:,k])+self.q)
P_R_blocks.append(self.R_H(x_guess[:,k], u_guess[:,k], x_ref[:,k]))
q_r_blocks.append(self.R_J(x_guess[:,k], u_guess[:,k], x_ref[:,k])+self.r)
P = block_diag(*P_Q_blocks,*P_R_blocks)
q = np.hstack(q_q_blocks+q_r_blocks)
P = 0.5*(P.T+P)
Ad, Bd, gd = zip(*[self.model(q[:3], q[3:], u, self.dT)
for q, u in zip(x_guess.T, u_guess.T)])
A = block_diag(*Ad)
B = block_diag(*Bd)
b = np.hstack((
x_guess[:,0],
- np.hstack(gd)
))
A = np.block([
[np.eye(self.NX), np.zeros((self.NX, X_DIM + U_DIM - self.NX))],
[A, np.zeros((X_DIM - self.NX, self.NX)), B]
])
A -= np.block([
[np.zeros((self.NX, X_DIM+U_DIM))],
[
np.zeros((X_DIM-self.NX, self.NX)),
np.eye(X_DIM - self.NX),
np.zeros_like(B)
]
])
### Track Constratint
G = [
self.G(x_g, x_r)
for x_g,x_r
in zip(x_guess.T, x_ref.T)
]
h_block = [
np.asarray(self.G(x_g, x_r))@x_g - np.asarray(self.h(x_g, x_r) )
for x_g, x_r
in zip(x_guess.T, x_ref.T)
]
G = np.hstack([
block_diag(*G),
np.zeros((T+1, U_DIM))
])
h = np.hstack(h_block)
x_l = np.tile(self.x_l, T+1)
# Set trust region
x_l[6::self.NX] = -0.2 + x_guess[6]
u_l = np.tile(self.u_l, T)
x_l = np.hstack((x_l, u_l))
x_u = np.tile(self.x_u, T+1)
# Set trust region
x_u[6::self.NX] = 0.2 + x_guess[6]
u_u = np.tile(self.u_u, T)
x_u = np.hstack((x_u, u_u))
#print([x.shape for x in [P, q, G, h, A, b, x_l, x_u]])
try:
if self.Norm is None:
ret = solve_qp(P, q, G, h, A, b, x_l, x_u, solver='osqp')
else:
init_val = [email protected]((x_guess.T.ravel(), u_guess.T.ravel()))
#print("Equation Const", np.all(A@init_val==b))
#print("InEquation Const", np.all(G@init_val<=h))
#print("Lower bound Const", np.all(init_val>=x_l))
#print("Upper bound Const", np.all(init_val<=x_u))
#print("")
ret = solve_qp(self.Norm@[email protected],
[email protected],
[email protected], h,
self.Norm_inv[:X_DIM,:X_DIM]@[email protected], self.Norm_inv[:X_DIM,:X_DIM]@b,
self.Norm_inv@x_l, self.Norm_inv@x_u,
initvals=init_val, solver='osqp')
if ret[0] is not None:
ret = self.Norm@ret
except Exception as e:
print(e)
return np.zeros_like(x_guess), np.zeros_like(u_guess), None
#if ret.dtype != np.object:
if ret is not None:
ret_x = ret[:X_DIM].reshape((-1, self.NX)).T
ret_u = ret[X_DIM:].reshape((-1, self.NU)).T
return ret_x, ret_u, 0.
else:
return x_guess, u_guess, None
|
py
|
1a5bdaf789f67889b55f2d0c1fc0fe3a5122b896
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from flask_babel import lazy_gettext as _
from marshmallow.validate import ValidationError
from superset.commands.exceptions import (
CommandInvalidError,
CreateFailedError,
DeleteFailedError,
ForbiddenError,
ImportFailedError,
ObjectNotFoundError,
UpdateFailedError,
)
class DashboardSlugExistsValidationError(ValidationError):
"""
Marshmallow validation error for dashboard slug already exists
"""
def __init__(self) -> None:
super().__init__([_("Must be unique")], field_name="slug")
class DashboardInvalidError(CommandInvalidError):
message = _("Dashboard parameters are invalid.")
class DashboardNotFoundError(ObjectNotFoundError):
def __init__(
self, dashboard_id: Optional[str] = None, exception: Optional[Exception] = None
) -> None:
super().__init__("Dashboard", dashboard_id, exception)
class DashboardCreateFailedError(CreateFailedError):
message = _("Dashboard could not be created.")
class DashboardBulkDeleteFailedError(CreateFailedError):
message = _("Dashboards could not be deleted.")
class DashboardBulkDeleteFailedReportsExistError(DashboardBulkDeleteFailedError):
message = _("There are associated alerts or reports")
class DashboardUpdateFailedError(UpdateFailedError):
message = _("Dashboard could not be updated.")
class DashboardDeleteFailedError(DeleteFailedError):
message = _("Dashboard could not be deleted.")
class DashboardDeleteFailedReportsExistError(DashboardDeleteFailedError):
message = _("There are associated alerts or reports")
class DashboardForbiddenError(ForbiddenError):
message = _("Changing this Dashboard is forbidden")
class DashboardImportError(ImportFailedError):
message = _("Import dashboard failed for an unknown reason")
class DashboardAccessDeniedError(ForbiddenError):
message = _("You don't have access to this dashboard.")
|
py
|
1a5bdb003d9e8a60d94377577223df5c9d0e7a72
|
import pytest
from tornado import gen
from distributed import Executor, Scheduler
from distributed.diagnostics.progressbar import TextProgressBar, progress
from distributed.utils_test import (cluster, _test_cluster, loop, inc,
div, dec, cluster_center)
from time import time, sleep
def test_text_progressbar(capsys, loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
futures = e.map(inc, range(10))
p = TextProgressBar(futures, interval=0.01, complete=True)
e.gather(futures)
start = time()
while p.status != 'finished':
sleep(0.01)
assert time() - start < 5
check_bar_completed(capsys)
assert p._last_response == {'all': 10,
'remaining': 0,
'status': 'finished'}
assert p.stream.closed()
def test_TextProgressBar_error(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
s.update_graph(tasks={'x': (div, 1, 0)},
keys=['x'],
dependencies={})
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
s.close()
yield done
_test_cluster(f, loop)
def test_TextProgressBar_empty(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False,
interval=0.01)
yield progress.listen()
assert progress.status == 'finished'
check_bar_completed(capsys)
s.close()
yield done
_test_cluster(f, loop)
def check_bar_completed(capsys, width=40):
out, err = capsys.readouterr()
bar, percent, time = [i.strip() for i in out.split('\r')[-1].split('|')]
assert bar == '[' + '#'*width + ']'
assert percent == '100% Completed'
def test_progress_function(loop, capsys):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
f = e.submit(lambda: 1)
g = e.submit(lambda: 2)
progress([[f], [[g]]], notebook=False)
check_bar_completed(capsys)
|
py
|
1a5bdb3d3898e98ea248c79bff48c2cf2f4354b4
|
import sys
import os.path
from setuptools import setup, find_packages
PACKAGE_NAME = 'arcana'
# Get version from module inside package
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
PACKAGE_NAME))
from __about__ import __version__, install_requires, tests_require # noqa pylint: disable=no-name-in-module
sys.path.pop(0)
setup(
name=PACKAGE_NAME,
version=__version__,
author='Tom G. Close',
author_email='[email protected]',
packages=find_packages(),
url='https://github.com/monashbiomedicalimaging/arcana',
license='The Apache Software Licence 2.0',
description=(
'Abstracted repository-centric analysis framework'),
long_description=open('README.rst').read(),
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Medical Science Apps."],
keywords='repository analysis')
|
py
|
1a5bdbef17583f343dbd9edc09515d7729bcca78
|
"""
All joins in a single app, inspired by
https://stackoverflow.com/questions/45990633/what-are-the-various-join-types-in-spark.
Used in Spark in Action 2e, http://jgp.net/sia
@author rambabu.posa
"""
import logging
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField,
IntegerType, StringType)
def create_left_df(spark):
schema = StructType([
StructField('id', IntegerType(), True),
StructField('value', StringType(), True)
])
rows = [
(1, "Value 1"),
(2, "Value 2"),
(3, "Value 3"),
(4, "Value 4")
]
return spark.createDataFrame(rows, schema)
def create_right_df(spark):
schema = StructType([
StructField('id', IntegerType(), True),
StructField('value', StringType(), True)
])
rows = [
(3, "Value 3"),
(4, "Value 4"),
(4, "Value 4_1"),
(5, "Value 5"),
(6, "Value 6")
]
return spark.createDataFrame(rows, schema)
def main(spark):
left_df = create_left_df(spark)
left_df.show()
right_df = create_right_df(spark)
right_df.show()
join_types = [
"inner",
"outer",
"full",
"full_outer",
"left",
"left_outer",
"right",
"right_outer",
"left_semi",
"left_anti",
"cross"
]
for join_type in join_types:
logging.warning(join_type.upper().join(" JOIN"))
df = left_df.join(right_df, left_df["id"] == right_df["id"], join_type)
df.orderBy(left_df["id"]).show()
logging.warning("CROSS JOIN (without a column")
df = left_df.crossJoin(right_df)
df.orderBy(left_df["id"]).show()
if __name__ == "__main__":
# Creates a session on a local master
spark = SparkSession.builder.appName("All joins!") \
.master("local[*]").getOrCreate()
# setting log level, update this as per your requirement
spark.sparkContext.setLogLevel("warn")
main(spark)
spark.stop()
|
py
|
1a5bdc62a556a8c3efc6ac442c7464bebfcac2e0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-10 11:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kisa', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='laji',
name='kilpailu',
),
migrations.AddField(
model_name='kilpailu',
name='kisaaja',
field=models.ManyToManyField(to='kisa.Kisaaja'),
),
migrations.AddField(
model_name='kilpailu',
name='laji',
field=models.ManyToManyField(to='kisa.Laji'),
),
]
|
py
|
1a5bdcdb3bed0a6b19f70222d53ffcd9defd786c
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'distinfo.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_fgdc_distinfo(object):
def setupUi(self, fgdc_distinfo):
fgdc_distinfo.setObjectName("fgdc_distinfo")
fgdc_distinfo.resize(1103, 862)
self.verticalLayout_10 = QtWidgets.QVBoxLayout(fgdc_distinfo)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.widget = QtWidgets.QWidget(fgdc_distinfo)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(0, 25))
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(10, 2, 2, 2)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_5 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setMinimumSize(QtCore.QSize(15, 0))
self.label_5.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setTextFormat(QtCore.Qt.RichText)
self.label_5.setScaledContents(False)
self.label_5.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
self.label_5.setIndent(0)
self.label_5.setObjectName("label_5")
self.horizontalLayout.addWidget(self.label_5)
spacerItem = QtWidgets.QSpacerItem(
0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout.addItem(spacerItem)
self.widget_2 = QtWidgets.QWidget(self.widget)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setMinimumSize(QtCore.QSize(100, 0))
self.widget_2.setObjectName("widget_2")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(5, -1, 5, -1)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_4 = QtWidgets.QLabel(self.widget_2)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setMinimumSize(QtCore.QSize(15, 0))
self.label_4.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setTextFormat(QtCore.Qt.RichText)
self.label_4.setScaledContents(True)
self.label_4.setAlignment(
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft
)
self.label_4.setIndent(0)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.label_3 = QtWidgets.QLabel(self.widget_2)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setMinimumSize(QtCore.QSize(79, 0))
self.label_3.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setTextFormat(QtCore.Qt.RichText)
self.label_3.setScaledContents(False)
self.label_3.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.label_3.setIndent(0)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.verticalLayout_7.addLayout(self.horizontalLayout_3)
self.horizontalLayout.addWidget(self.widget_2)
self.verticalLayout_10.addWidget(self.widget)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.radio_distno = QtWidgets.QRadioButton(fgdc_distinfo)
self.radio_distno.setChecked(True)
self.radio_distno.setObjectName("radio_distno")
self.verticalLayout_2.addWidget(self.radio_distno)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.line = QtWidgets.QFrame(fgdc_distinfo)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line.sizePolicy().hasHeightForWidth())
self.line.setSizePolicy(sizePolicy)
self.line.setMaximumSize(QtCore.QSize(16777215, 100))
self.line.setFrameShadow(QtWidgets.QFrame.Raised)
self.line.setLineWidth(3)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setObjectName("line")
self.horizontalLayout_9.addWidget(self.line)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
spacerItem1 = QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_10.addItem(spacerItem1)
self.label = QtWidgets.QLabel(fgdc_distinfo)
self.label.setMaximumSize(QtCore.QSize(16777215, 100))
self.label.setObjectName("label")
self.horizontalLayout_10.addWidget(self.label)
spacerItem2 = QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_10.addItem(spacerItem2)
self.horizontalLayout_9.addLayout(self.horizontalLayout_10)
self.line_2 = QtWidgets.QFrame(fgdc_distinfo)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line_2.sizePolicy().hasHeightForWidth())
self.line_2.setSizePolicy(sizePolicy)
self.line_2.setMaximumSize(QtCore.QSize(16777215, 100))
self.line_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.line_2.setLineWidth(3)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.horizontalLayout_9.addWidget(self.line_2)
self.verticalLayout_2.addLayout(self.horizontalLayout_9)
self.radio_distyes = QtWidgets.QRadioButton(fgdc_distinfo)
self.radio_distyes.setObjectName("radio_distyes")
self.verticalLayout_2.addWidget(self.radio_distyes)
self.verticalLayout_10.addLayout(self.verticalLayout_2)
self.scrollArea = QtWidgets.QScrollArea(fgdc_distinfo)
font = QtGui.QFont()
font.setKerning(False)
self.scrollArea.setFont(font)
self.scrollArea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1068, 717))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.widget_distinfo = QtWidgets.QWidget(self.scrollAreaWidgetContents)
self.widget_distinfo.setMinimumSize(QtCore.QSize(0, 0))
self.widget_distinfo.setObjectName("widget_distinfo")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_distinfo)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.two_column = QtWidgets.QWidget(self.widget_distinfo)
self.two_column.setObjectName("two_column")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.two_column)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(3)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.two_column_left = QtWidgets.QWidget(self.two_column)
self.two_column_left.setObjectName("two_column_left")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.two_column_left)
self.verticalLayout_5.setContentsMargins(1, 1, 1, 1)
self.verticalLayout_5.setSpacing(1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.fgdc_distrib = QtWidgets.QGroupBox(self.two_column_left)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fgdc_distrib.sizePolicy().hasHeightForWidth())
self.fgdc_distrib.setSizePolicy(sizePolicy)
self.fgdc_distrib.setObjectName("fgdc_distrib")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.fgdc_distrib)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_2 = QtWidgets.QLabel(self.fgdc_distrib)
self.label_2.setMinimumSize(QtCore.QSize(0, 20))
self.label_2.setStyleSheet("font: italic;")
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.frame = QtWidgets.QFrame(self.fgdc_distrib)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setStyleSheet(
"QFrame#frame{ \n"
'font: 75 10pt "Arial";\n'
"border: 1px solid black;\n"
"border-radius: 3px;\n"
"background: QLinearGradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #eef, stop: 1 #ccf);\n"
"}\n"
""
)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setContentsMargins(9, 9, 9, 9)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(6, -1, -1, -1)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_6 = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setMinimumSize(QtCore.QSize(0, 20))
self.label_6.setStyleSheet("font: italic;")
self.label_6.setWordWrap(False)
self.label_6.setObjectName("label_6")
self.horizontalLayout_5.addWidget(self.label_6)
spacerItem3 = QtWidgets.QSpacerItem(
0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_5.addItem(spacerItem3)
self.button_use_sb = QtWidgets.QPushButton(self.frame)
self.button_use_sb.setMinimumSize(QtCore.QSize(150, 0))
self.button_use_sb.setObjectName("button_use_sb")
self.horizontalLayout_5.addWidget(self.button_use_sb)
self.verticalLayout_11.addLayout(self.horizontalLayout_5)
self.horizontalLayout_8.addLayout(self.verticalLayout_11)
self.verticalLayout_4.addWidget(self.frame)
self.verticalLayout_5.addWidget(self.fgdc_distrib)
spacerItem4 = QtWidgets.QSpacerItem(
20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_5.addItem(spacerItem4)
self.horizontalLayout_4.addWidget(self.two_column_left)
self.two_column_right = QtWidgets.QWidget(self.two_column)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.two_column_right.sizePolicy().hasHeightForWidth()
)
self.two_column_right.setSizePolicy(sizePolicy)
self.two_column_right.setObjectName("two_column_right")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.two_column_right)
self.verticalLayout_6.setContentsMargins(1, 1, 1, 1)
self.verticalLayout_6.setSpacing(1)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.groupBox_3 = QtWidgets.QGroupBox(self.two_column_right)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName("verticalLayout")
self.radio_online = QtWidgets.QRadioButton(self.groupBox_3)
self.radio_online.setObjectName("radio_online")
self.verticalLayout.addWidget(self.radio_online)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
spacerItem5 = QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_7.addItem(spacerItem5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_7 = QtWidgets.QLabel(self.groupBox_3)
self.label_7.setObjectName("label_7")
self.horizontalLayout_6.addWidget(self.label_7)
self.fgdc_networkr = QtWidgets.QLineEdit(self.groupBox_3)
self.fgdc_networkr.setEnabled(False)
self.fgdc_networkr.setClearButtonEnabled(False)
self.fgdc_networkr.setObjectName("fgdc_networkr")
self.horizontalLayout_6.addWidget(self.fgdc_networkr)
self.horizontalLayout_7.addLayout(self.horizontalLayout_6)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.radio_dist = QtWidgets.QRadioButton(self.groupBox_3)
self.radio_dist.setObjectName("radio_dist")
self.verticalLayout.addWidget(self.radio_dist)
self.radio_otherdist = QtWidgets.QRadioButton(self.groupBox_3)
self.radio_otherdist.setObjectName("radio_otherdist")
self.verticalLayout.addWidget(self.radio_otherdist)
self.fgdc_custom = QtWidgets.QPlainTextEdit(self.groupBox_3)
self.fgdc_custom.setEnabled(False)
self.fgdc_custom.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.fgdc_custom.setObjectName("fgdc_custom")
self.verticalLayout.addWidget(self.fgdc_custom)
self.verticalLayout_6.addWidget(self.groupBox_3)
self.groupBox = QtWidgets.QGroupBox(self.two_column_right)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.groupBox.setObjectName("groupBox")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_10 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
self.label_10.setSizePolicy(sizePolicy)
self.label_10.setMinimumSize(QtCore.QSize(0, 20))
self.label_10.setStyleSheet("font: italic;")
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.verticalLayout_8.addWidget(self.label_10)
self.fgdc_distliab = GrowingTextEdit(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fgdc_distliab.sizePolicy().hasHeightForWidth()
)
self.fgdc_distliab.setSizePolicy(sizePolicy)
self.fgdc_distliab.setObjectName("fgdc_distliab")
self.verticalLayout_8.addWidget(self.fgdc_distliab)
self.verticalLayout_6.addWidget(self.groupBox)
self.group_datafees = QtWidgets.QGroupBox(self.two_column_right)
self.group_datafees.setMaximumSize(QtCore.QSize(16777215, 100))
self.group_datafees.setObjectName("group_datafees")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.group_datafees)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_12 = QtWidgets.QLabel(self.group_datafees)
self.label_12.setMinimumSize(QtCore.QSize(0, 20))
self.label_12.setStyleSheet("font: italic;")
self.label_12.setWordWrap(True)
self.label_12.setObjectName("label_12")
self.verticalLayout_9.addWidget(self.label_12)
self.fgdc_fees = QtWidgets.QPlainTextEdit(self.group_datafees)
self.fgdc_fees.setEnabled(False)
self.fgdc_fees.setMaximumSize(QtCore.QSize(16777215, 50))
self.fgdc_fees.setObjectName("fgdc_fees")
self.verticalLayout_9.addWidget(self.fgdc_fees)
self.verticalLayout_6.addWidget(self.group_datafees)
spacerItem6 = QtWidgets.QSpacerItem(
20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_6.addItem(spacerItem6)
self.groupBox_3.raise_()
self.groupBox.raise_()
self.group_datafees.raise_()
self.horizontalLayout_4.addWidget(self.two_column_right)
self.horizontalLayout_2.addWidget(self.two_column)
self.verticalLayout_3.addWidget(self.widget_distinfo)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_10.addWidget(self.scrollArea)
self.retranslateUi(fgdc_distinfo)
QtCore.QMetaObject.connectSlotsByName(fgdc_distinfo)
def retranslateUi(self, fgdc_distinfo):
_translate = QtCore.QCoreApplication.translate
fgdc_distinfo.setWindowTitle(_translate("fgdc_distinfo", "Form"))
self.label_5.setToolTip(_translate("fgdc_distinfo", "Required"))
self.label_5.setText(
_translate(
"fgdc_distinfo",
'<html><head/><body><p><span style=" font-style:italic; color:#55aaff;">Provide information about access to the data, the data distribution format, and the data distributor.</span></p></body></html>',
)
)
self.label_4.setToolTip(_translate("fgdc_distinfo", "Required"))
self.label_4.setText(
_translate(
"fgdc_distinfo",
'<html><head/><body><p><span style=" font-size:15pt; color:#55aaff;">*</span></p></body></html>',
)
)
self.label_3.setToolTip(_translate("fgdc_distinfo", "Required"))
self.label_3.setText(
_translate(
"fgdc_distinfo",
'<html><head/><body><p><span style=" font-size:9pt; font-style:italic; color:#55aaff;">= Required</span></p></body></html>',
)
)
self.radio_distno.setText(
_translate(
"fgdc_distinfo",
"Dataset is for internal use only and will NOT be shared or distributed. The metadata record does not need distribution information.",
)
)
self.label.setText(_translate("fgdc_distinfo", "OR"))
self.radio_distyes.setText(
_translate(
"fgdc_distinfo",
"Details on how to acquire/access the data are described below.",
)
)
self.fgdc_distrib.setTitle(_translate("fgdc_distinfo", "Distribution Contact"))
self.label_2.setText(
_translate(
"fgdc_distinfo",
"Contact information for the person or organization responsible for the distribution of the data.",
)
)
self.label_6.setText(
_translate(
"fgdc_distinfo",
"Will this dataset be distributed on the USGS ScienceBase System?",
)
)
self.button_use_sb.setText(_translate("fgdc_distinfo", "Add ScienceBase Info"))
self.groupBox_3.setTitle(
_translate("fgdc_distinfo", "How Can Others Access the Data?")
)
self.radio_online.setText(
_translate("fgdc_distinfo", "The dataset is available online.")
)
self.label_7.setText(
_translate("fgdc_distinfo", "URL of website or GIS service:")
)
self.radio_dist.setText(
_translate(
"fgdc_distinfo",
"The dataset is not available online. Interested parties should contact the distributor for details on \n"
"acquiring the data. (Provide 'Distributor Contact' information.",
)
)
self.radio_otherdist.setText(
_translate("fgdc_distinfo", "Other Distribution method. (Describe below)")
)
self.groupBox.setTitle(_translate("fgdc_distinfo", "Distribution Liability"))
self.label_10.setText(
_translate(
"fgdc_distinfo",
"List any distribution disclaimers or limitations of liability.",
)
)
self.group_datafees.setTitle(_translate("fgdc_distinfo", "Data Fees"))
self.label_12.setText(
_translate("fgdc_distinfo", "Describe any fees associated with this data.")
)
from growingtextedit import GrowingTextEdit
|
py
|
1a5bdce41aab759a901932b48d525c88360dc457
|
"""
Copyright (C) 2020-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import bool_to_str
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class CTCLoss(Op):
op = 'CTCLoss'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': self.op,
'op': self.op,
'version': 'opset4',
'type_infer': self.type_infer,
'infer': self.infer,
'in_ports_count': 5,
'out_ports_count': 1,
'preprocess_collapse_repeated': False,
'ctc_merge_repeated': True,
'unique': False
}
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [('preprocess_collapse_repeated', lambda node: bool_to_str(node, 'preprocess_collapse_repeated')),
('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated')),
('unique', lambda node: bool_to_str(node, 'unique'))]
@staticmethod
def type_infer(node):
logits_type = node.in_port(0).get_data_type()
logit_length_type = node.in_port(1).get_data_type()
labels_type = node.in_port(2).get_data_type()
label_length_type = node.in_port(3).get_data_type()
blank_index_type = labels_type
if not node.in_port(4).disconnected():
blank_index_type = node.in_port(4).get_data_type()
assert logit_length_type == label_length_type and logit_length_type in [np.int64, np.int32], \
'Inputs with logits and labels lengths for node {} must be the same and int32 or int64, {} and {} found'.format(
node.soft_get('name'), logit_length_type, label_length_type)
assert labels_type == blank_index_type and labels_type in [np.int64, np.int32], \
'Inputs with labels and blank index for node {} must be the same and int32 or int64, {} and {} found'.format(
node.soft_get('name'), labels_type, blank_index_type)
node.out_port(0).set_data_type(logits_type)
@staticmethod
def infer(node: Node):
node_name = node.soft_get('name', node.id)
connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
assert len(connected_in_ports) in [4, 5], \
"Incorrect number of inputs for {} node".format(node_name)
logits_shape = node.in_port(0).data.get_shape()
logit_length_shape = node.in_port(1).data.get_shape()
labels_shape = node.in_port(2).data.get_shape()
label_length_shape = node.in_port(3).data.get_shape()
blank_index_shape = int64_array([])
if len(node.in_nodes()) == 5:
blank_index_shape = node.in_port(4).data.get_shape()
# check shapes of input tensors
assert len(logits_shape) == 3 and len(logit_length_shape) == 1 and len(labels_shape) == 2\
and len(label_length_shape) == 1 and len(blank_index_shape) == 0, \
'Incorrect rank of some input tensor for {} node'.format(node_name)
assert logits_shape[0] == logit_length_shape[0] and logits_shape[0] == labels_shape[0]\
and logits_shape[0] == label_length_shape[0], \
'Batch dimensions of input tensors must be the same for {} node'.format(node_name)
assert logits_shape[1] == labels_shape[1], \
'Time dimensions of input tensors must be the same for {} node'.format(node_name)
batch_size = logits_shape[0]
node.out_port(0).data.set_shape(int64_array([batch_size]))
|
py
|
1a5bdd7d3c0ef8d63e084e4b32c0c397f5e5470c
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Field'
db.create_table(u'fields_field', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255)),
('application', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
))
db.send_create_signal(u'fields', ['Field'])
# Adding model 'Option'
db.create_table(u'fields_option', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fields.Field'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=140)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'fields', ['Option'])
# Adding model 'FieldOption'
db.create_table(u'fields_fieldoption', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fields.Field'])),
('option', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fields.Option'])),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'fields', ['FieldOption'])
def backwards(self, orm):
# Deleting model 'Field'
db.delete_table(u'fields_field')
# Deleting model 'Option'
db.delete_table(u'fields_option')
# Deleting model 'FieldOption'
db.delete_table(u'fields_fieldoption')
models = {
u'fields.field': {
'Meta': {'object_name': 'Field'},
'application': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
u'fields.fieldoption': {
'Meta': {'ordering': "['-order']", 'object_name': 'FieldOption'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fields.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fields.Option']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'fields.option': {
'Meta': {'object_name': 'Option'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fields.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['fields']
|
py
|
1a5bddd00aa0c4143b319ef4a8f847a7b203bd0d
|
__author__ = 'the-kid89'
"""
A sample program that uses multiple intents and disambiguates by
intent confidence
try with the following:
PYTHONPATH=. python examples/multi_intent_parser.py "what's the weather like in tokyo"
PYTHONPATH=. python examples/multi_intent_parser.py "play some music by the clash"
"""
import json
import sys
from adapt.entity_tagger import EntityTagger
from adapt.tools.text.tokenizer import EnglishTokenizer
from adapt.tools.text.trie import Trie
from adapt.intent import IntentBuilder
from adapt.parser import Parser
from adapt.engine import DomainIntentDeterminationEngine
tokenizer = EnglishTokenizer()
trie = Trie()
tagger = EntityTagger(trie, tokenizer)
parser = Parser(tokenizer, tagger)
engine = DomainIntentDeterminationEngine()
engine.register_domain('Domain1')
engine.register_domain('Domain2')
# define vocabulary
weather_keyword = [
"weather"
]
for wk in weather_keyword:
engine.register_entity(wk, "WeatherKeyword", domain='Domain1')
weather_types = [
"snow",
"rain",
"wind",
"sleet",
"sun"
]
for wt in weather_types:
engine.register_entity(wt, "WeatherType", domain='Domain1')
locations = [
"Seattle",
"San Francisco",
"Tokyo"
]
for l in locations:
engine.register_entity(l, "Location", domain='Domain1')
# structure intent
weather_intent = IntentBuilder("WeatherIntent")\
.require("WeatherKeyword")\
.optionally("WeatherType")\
.require("Location")\
.build()
# define music vocabulary
artists = [
"third eye blind",
"the who",
"the clash",
"john mayer",
"kings of leon",
"adelle"
]
for a in artists:
engine.register_entity(a, "Artist", domain='Domain2')
music_verbs = [
"listen",
"hear",
"play"
]
for mv in music_verbs:
engine.register_entity(mv, "MusicVerb", domain='Domain2')
music_keywords = [
"songs",
"music"
]
for mk in music_keywords:
engine.register_entity(mk, "MusicKeyword", domain='Domain2')
music_intent = IntentBuilder("MusicIntent")\
.require("MusicVerb")\
.optionally("MusicKeyword")\
.optionally("Artist")\
.build()
engine.register_intent_parser(weather_intent, domain='Domain1')
engine.register_intent_parser(music_intent, domain='Domain2')
if __name__ == "__main__":
for intents in engine.determine_intent(' '.join(sys.argv[1:])):
print(intents)
|
py
|
1a5bde00dd8bb51e439591c86ef0d4d1410e18f2
|
import discord
from discord.ext import commands
import re
from discord import app_commands
color = 0xc48aff
class HelpDropdown(discord.ui.Select):
def __init__(self):
options = [
discord.SelectOption(label='Economy', description='add, profile, shop, blackjack, slots, coinflip, leaderboard', emoji="💰"),
discord.SelectOption(label='Moderation', description='mute, tempmute, unmute, kick, ban, softban, purge', emoji="<:moderation:893273273385754686>"),
discord.SelectOption(label='Info', description='prices, crypto, covid, invite, track, userinfo, botinfo, vote, bug, feedback', emoji="ℹ️"),
discord.SelectOption(label='Music (BETA)', description='play, skip, queue, remove, stop, clear, repeat, shuffle, nowplaying, pause, remove', emoji='🎵'),
discord.SelectOption(label='Admin', description='setprefix, setlevel, levelreset, dellevel, levelchannel, setmute, muterole, delmute, setjoin', emoji="⚙️"),
discord.SelectOption(label='Fun', description='level, levelboard, ping, new', emoji='🎉'),
]
super().__init__(placeholder='Choose a category...', min_values=1, max_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
if self.values[0] == 'Economy':
embed = discord.Embed(
title = "💰 - Economy Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Add**", value = f"**Usage: `/add`**\nGives you $2,500. Can be run every 2 hours", inline=False)
embed.add_field(name = "**Shop**", value = f"**Usage: `/shop`**\nGives you the shop menus so that you can buy items", inline=False)
embed.add_field(name = "**🃏 - Blackjack**", value = f"**Usage: `/blackjack <bet>`**\nIf no bet is given, the deafult bet of $125 will be placed", inline=False)
embed.add_field(name = "**🎰 - Slots**", value = f"**Usage: `/slots <bet>`**\nIf no bet is given, the default bet of $125 will be placed.", inline=False)
embed.add_field(name = "**🪙 - Coinflip**", value = f"**Usage: `/coinflip <bet>`**\nHeads means you win, tails means you lose. If no bet is given, the default bet of $125 will be placed.", inline=False)
embed.add_field(name = "**💵 - Profile**", value = f"**Usage: `/profile <member>`**\nShows the amount of money and ranks that a user has", inline=False),
embed.add_field(name = "**🏅 - Leaderboard**", value = f"**Usage: `/leaderboard` **\nShows the top 5 players with the most money. This is a global leaderboard and not per server.", inline=False)
await interaction.response.edit_message(embed=embed)
if self.values[0] == 'Moderation':
embed = discord.Embed(
title = "<:moderation:893273273385754686> - Moderation Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Warn**", value = f"**Usage: `/warn <member> <reason>`** \nWarn a member for doing something against the rules.", inline=True)
embed.add_field(name = "**Delwarn**", value = f"**Usage: `/delwarn <warn ID>`** \nDelete a warning from a member so that it is no longer on their record.", inline=True)
embed.add_field(name = "**Warnings**", value = f"**Usage: `/warnings <member>`** \nSee all of the warnings for a member. Also includes when they were warned, and who warned them.", inline=True)
embed.add_field(name = "**Mute**", value = f"**Usage: `/mute <member> <time>`** \nMute a member so they can't send anymore messages.", inline=True)
embed.add_field(name = "**Tempmute**", value = f"**Usage: `/tempmute <member> <time - in hours>` \nExample: `/tempmute @bob 2`** \nMutes the member temporarily, they will be unmuted once the specified time has passed.", inline=True)
embed.add_field(name = "**Unmute**", value = f"**Usage: `/unmute <member>`** \nUnmute a member so they are able to send messages again.", inline=True)
embed.add_field(name = "**Purge**", value = f"**Usage: `/purge <amount>`** \nDelete messages from your server. Max amount that can be deleted at one time is `100` messages.")
embed.add_field(name = "**Kick**", value = f"**Usage: `/kick <member> <reason>`** \nKick a member from your server. They will be able to join back with a new invite.", inline=True)
embed.add_field(name = "**Ban**", value = f"**Usage: `/ban <member> <reason>`** \nBan a member from your server. They will not be able to join back until they are unbanned.", inline=True)
embed.add_field(name = "**Softban**", value = f"**Usage: `/softban <member> <reason>`** \nThis command will ban and then immediately unban the member in order to get rid of their message history.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Info":
embed = discord.Embed(
title = "ℹ️ - Info Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Prices**", value = f"**Usage: `/prices`** \nShows the prices for the 20 cryptocurrencies that we currently list", inline=True)
embed.add_field(name = "**Crypto**", value = f"**Usage: `/crypto <ticker>`** \nShows expanded information on the specific currency given its ticker.", inline=True)
embed.add_field(name = "**Covid**", value = f"**Usage: `/covid` **\nSends the current global COVID-19 data.", inline=True)
embed.add_field(name = "**Invite**", value = f"**Usage: `/invite` **\nSends the invite for the bot and the official support server.", inline=True)
embed.add_field(name = "**Track**", value = f"**Usage: `/track`** \nSends the amount of servers that the bot is in, as well as the cumulative amount of members.", inline=True)
embed.add_field(name = "**User Info**", value = f"**Usage: `/userinfo <member>`** \nGives information on a member in your server. Information includes account creation date, when they joined your server, and some more.", inline=True)
embed.add_field(name = "**Bot Info**", value = f"**Usage: `/botinfo`** \nGives information on the bot.", inline=True)
embed.add_field(name = "**Vote**", value = f"**Usage: `/vote`** \nSends the link for you to vote for our bot on top.gg", inline=True)
embed.add_field(name = "**Bug**", value = f"**Usage: `/bug`** \nShows a form to be filled out to notify the developer of a bug", inline=True)
embed.add_field(name = "**Feedback**", value = f"**Usage: `/feedback`** \nShows a form to be filled out to show the developer feedback on the both", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Music (BETA)":
embed = discord.Embed(
title = f"🎵 - Music Help \n*NOTE - These commands are still in beta. Please report bugs using `/contact`",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Play**", value = f"**Usage: `/play <name of song / URL>` **\nSearches YouTube, and then plays the top song.", inline=True)
embed.add_field(name = "**Skip**", value = f"**Usage: `/skip` **\nSkips the song that is currently playing.", inline=True)
embed.add_field(name = "**Queue**", value = f"**Usage: `/queue`** \nSends all of the songs that are in the queue.", inline=True)
embed.add_field(name = "**Remove**", value = f"**Usage: `/remove <song #>` **\nRemoves the specified song from the queue.", inline=True)
embed.add_field(name = "**Stop**", value = f"**Usage: `/stop`** \nStops music, clears queue, and leaves VC.", inline=True),
embed.add_field(name = "**Clear**", value = f"**Usage: `/clear` **\nRemoves ALL songs in the queue.", inline=True)
embed.add_field(name = "**Repeat**", value = f"**Usage: `/remove`** \nRepeats the song that is playing. Run the command again to stop repeating.", inline=True)
embed.add_field(name = "**Shuffle**", value = f"**Usage: `/shuffle`** \nWill play a random song in the queue. Run the command again to stop shuffling.", inline=True)
embed.add_field(name = "**Np**", value = f"**Usage: `/np` **\nSends the song that is currently playing.", inline=True)
embed.add_field(name = "**Pause**", value = f"**Usage: `/pause`** \nPauses the currently playing song.", inline=True)
embed.add_field(name = "**Resume**", value = f"**Usage: `/resume` **\nResumes the paused song.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Admin":
embed = discord.Embed(
title = "⚙️ - Admin Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Setprefix**", value = f"**Usage: `/setprefix <new prefix>` **\nSets the prefix for the bot in your specific server.", inline=True)
embed.add_field(name = "**Setlevel**", value = f"**Usage: `/setlevel <name of channel>` **\nSets the channel for level up messages to be sent to.", inline=True)
embed.add_field(name = "**Levelreset**", value = f"**Usage: `/levelreset` **\nResets all of the levels for everyone in the server.", inline=True)
embed.add_field(name = "**Dellevel**", value = f"**Usage: `/dellevel` **\nDeletes the channel from our database, and stops sending new level up messages.", inline=True)
embed.add_field(name = "**Levelchannel**", value = f"**Usage: `/levelchannel` ** \nShows the current channel for leveling messages.", inline=True)
embed.add_field(name = "**Setmute**", value = f"**Usage: `/setmute <name of role>` **\nSets the role that will be given to users whenever you use the `/mute` command.", inline=True)
embed.add_field(name = "**Delmute**", value = f"**Usage: `/delmute` **\nDeletes the muted role from our database.", inline=True)
embed.add_field(name = "**Muterole**", value = f"**Usage: `/muterole`** \nSends the current role that is assigned as the muted role for your server.", inline=True)
embed.add_field(name = "**Setjoin**", value = f"**Usage: `/setjoin <name of channel>` **\nSets the channel for messages to be sent whenever a new user joins your server.", inline=True)
embed.add_field(name = "**Deljoin**", value = f"**Usage: `/deljoin`** \nDeletes the channel from our database, and stops sending new user messages.", inline=True),
embed.add_field(name = "**Joinchannel**", value = f"**Usage: `/joinchannel`** \nSends the current channel that is assigned as the new user messages channel.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == 'Fun':
embed = discord.Embed(
title = "🎉 - Fun Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Level**", value = f"**Usage: `/level`** \nSends your current level in the server.", inline=False)
embed.add_field(name = "**Levelboard**", value = f"**Usage: `/levelboard`** \nSends the current leaderboard for your servers leveling system.", inline=False)
embed.add_field(name = "**Ping**", value = f"**Usage: `/ping` **\nGives the current ping of the bot.", inline=True)
embed.add_field(name = "**New**", value = f"**Usage: `/new`** \nSends all of the changes to the bot.", inline=False)
await interaction.response.edit_message(embed=embed)
else:
return
class HelpView(discord.ui.View):
def __init__(self, timeout = 180.0):
super().__init__(timeout=timeout)
self.value = None
self.add_item(HelpDropdown())
url = "https://discord.com/api/oauth2/authorize?client_id=889027125275922462&permissions=8&scope=bot%20applications.commands"
self.add_item(discord.ui.Button(label="Invite Me", url=url, row=2))
@discord.ui.button(label='Main Page', style=discord.ButtonStyle.blurple, row=2)
async def main_page(self, interaction: discord.Interaction, button: discord.ui.Button):
embed = discord.Embed(
title = "Help",
description = f"**IMPORTANT - A lot of stuff changed, please use the `new` command to see all of the changes** \n\nFor extended information on commands and categories, please choose an option from the dropdown menu below.",
colour = discord.Colour.random()
)
await interaction.response.edit_message(embed=embed)
class slash_help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command()
async def help(
self,
interaction: discord.Interaction
):
"Sends the bots commands and features"
embed = discord.Embed(
title = "Help",
description = f"**IMPORTANT - All commands are now slash commands, and a few changes have been made. Please use `/new` to see any alterations.",
colour = discord.Colour.random()
)
view = HelpView()
await interaction.response.send_message(embed=embed, view=view, ephemeral=True)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if re.fullmatch(rf"<@!?{self.bot.user.id}>", message.content):
embed = discord.Embed(
title = f"All commands are now slash commands!",
description = f"**Use `/help` in order to get help on what commands are available.**",
colour = discord.Colour.blurple()
)
await message.reply(embed=embed)
async def setup(bot):
await bot.add_cog(slash_help(bot))
|
py
|
1a5bde0e66a5a6962dff6ba6500828d61cade08e
|
from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
reward_threshold=-100.0,
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v3',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v3',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='KellyCoinflip-v0',
entry_point='gym.envs.toy_text:KellyCoinflipEnv',
reward_threshold=246.61,
)
register(
id='KellyCoinflipGeneralized-v0',
entry_point='gym.envs.toy_text:KellyCoinflipGeneralizedEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='CliffWalking-v0',
entry_point='gym.envs.toy_text:CliffWalkingEnv',
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v3',
entry_point='gym.envs.toy_text:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(id='PointMazeRight-v0',
entry_point='gym.envs.mujoco:PointMazeEnv',
kwargs={'sparse_reward': False, 'direction': 1})
register(id='PointMazeLeft-v0',
entry_point='gym.envs.mujoco:PointMazeEnv',
kwargs={'sparse_reward': False, 'direction': 0})
register(
id='Reacher-v2',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='Pusher-v2',
entry_point='gym.envs.mujoco:PusherEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Thrower-v2',
entry_point='gym.envs.mujoco:ThrowerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Striker-v2',
entry_point='gym.envs.mujoco:StrikerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='InvertedPendulum-v2',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v2',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v2',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='HalfCheetah-v3',
entry_point='gym.envs.mujoco.half_cheetah_v3:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v2',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Hopper-v3',
entry_point='gym.envs.mujoco.hopper_v3:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v2',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Swimmer-v3',
entry_point='gym.envs.mujoco.swimmer_v3:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v2',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Walker2d-v3',
max_episode_steps=1000,
entry_point='gym.envs.mujoco.walker2d_v3:Walker2dEnv',
)
register(
id='Ant-v2',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Ant-v3',
entry_point='gym.envs.mujoco.ant_v3:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v2',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='Humanoid-v3',
entry_point='gym.envs.mujoco.humanoid_v3:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v2',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# Robotics
# ----------------------------------------
def _merge(a, b):
a.update(b)
return a
for reward_type in ['sparse', 'dense']:
suffix = 'Dense' if reward_type == 'dense' else ''
kwargs = {
'reward_type': reward_type,
}
# Fetch
register(
id='FetchSlide{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPickAndPlace{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchReach{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPush{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
# Hand
register(
id='HandReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='HandManipulateBlockRotateZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateZTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateZTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallel{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallelTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallelTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateBlock{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandBlockTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotateTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotateTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateEgg{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandEggTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotateTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotateTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulatePen{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenTouchSensors{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'boolean'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenTouchSensors{}-v1'.format(suffix),
entry_point='gym.envs.robotics:HandPenTouchSensorsEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz', 'touch_get_obs': 'sensordata'}, kwargs),
max_episode_steps=100,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['adventure', 'air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'defender', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Unit test
# ---------
register(
id='CubeCrash-v0',
entry_point='gym.envs.unittest:CubeCrash',
reward_threshold=0.9,
)
register(
id='CubeCrashSparse-v0',
entry_point='gym.envs.unittest:CubeCrashSparse',
reward_threshold=0.9,
)
register(
id='CubeCrashScreenBecomesBlack-v0',
entry_point='gym.envs.unittest:CubeCrashScreenBecomesBlack',
reward_threshold=0.9,
)
register(
id='MemorizeDigits-v0',
entry_point='gym.envs.unittest:MemorizeDigits',
reward_threshold=20,
)
|
py
|
1a5bdeaba35c5d173ee9e348ddf93ce1048fec63
|
import ast
import datetime
import json
import random
import time
import uuid
# noinspection PyUnresolvedReferences
from uuid import UUID # for UUID as object parsing
from collections.abc import Iterable
from types import ModuleType
from typing import Union
from jinja2 import Template, UndefinedError
from catcher.utils import module_utils
from catcher.utils.logger import debug
from catcher.core.filters_factory import FiltersFactory
def merge_two_dicts(x, y):
if not x:
return y
if not y:
return x
return {**x, **y}
def report_override(variables: dict, override: dict):
existing = set(variables)
replace = set(override)
return list(existing.intersection(replace))
def try_get_objects(source: str or dict or list):
got = try_get_object(source) # "'[1,2,3]'" -> '[1,2,3]' -> [1,2,3]
got = try_get_object(got) # '[1,2,3]' -> [1,2,3]
if isinstance(got, dict):
return dict([(k, try_get_objects(v)) for k, v in got.items()])
if isinstance(got, list):
return [try_get_objects(v) for v in got]
return got
def try_get_object(source: str or dict or list):
if isinstance(source, str):
try: # try python term '{key: "value"}'
evaled = eval_datetime(source)
if isinstance(evaled, ModuleType) or callable(evaled): # for standalone 'string' var or 'id' bif
return source
source = evaled
except Exception:
try: # try json object '{"key" : "value"}'
source = json.loads(source)
except ValueError:
return source
return source
def fill_template_recursive(source: Union[dict, list, str], variables: dict, glob=None, globs_added=None) \
-> Union[dict, list, str]:
if isinstance(source, dict):
return dict([(fill_template_recursive(k, variables, glob, globs_added),
fill_template_recursive(v, variables, glob, globs_added)) for k, v in source.items()])
if isinstance(source, list):
return [fill_template_recursive(v, variables, glob, globs_added) for v in source]
return fill_template(source, variables, glob, globs_added)
def fill_template(source: str, variables: dict, isjson=False, glob=None, globs_added=None) -> str:
if not globs_added:
globs_added = set()
if isinstance(source, str):
source = render(source, inject_builtins(variables))
if isjson: # do not parse json string back to objects
return source
try:
evaled = format_datetime(eval_datetime(source, glob))
if not isinstance(evaled, ModuleType) and not callable(evaled): # for standalone 'string' var or 'id' bif
source = evaled
except NameError as e: # try to import missing package and rerun this code
if 'is not defined' in str(e):
name = str(e).split("'")[1]
if name not in globs_added:
# f.e. tzinfo=psycopg2.tz.FixedOffsetTimezone for datetime
glob = module_utils.add_package_to_globals(name, glob, warn_missing_package=False)
globs_added.add(name)
filled = fill_template(source, variables, isjson, glob=glob, globs_added=globs_added)
if not isinstance(filled, ModuleType) and not callable(filled):
return filled # for standalone 'string' var or 'id' bif
except Exception:
pass
return source
def fill_template_str(source: any, variables: dict) -> str:
rendered = render(str(source), inject_builtins(variables))
if rendered != source:
return fill_template_str(rendered, variables)
return rendered
def eval_datetime(astr, glob=None):
if glob is None:
glob = globals()
try:
tree = ast.parse(astr)
except SyntaxError:
raise ValueError(astr)
for node in ast.walk(tree):
if isinstance(node, (ast.Module, ast.Expr, ast.Dict, ast.Str,
ast.Attribute, ast.Num, ast.Name, ast.Load, ast.Tuple)): continue
if (isinstance(node, ast.Call)
and isinstance(node.func, ast.Attribute)
and node.func.attr == 'datetime'): continue
pass
return eval(astr, glob)
def format_datetime(iterable):
if not isinstance(iterable, Iterable) or isinstance(iterable, str):
if isinstance(iterable, datetime.datetime):
return iterable.strftime('%Y-%m-%d %H:%M:%S.%f')
return iterable
else:
if isinstance(iterable, dict):
return dict([(format_datetime(k), format_datetime(v)) for k, v in iterable.items()])
elif isinstance(iterable, tuple):
return tuple([format_datetime(i) for i in iterable])
return [format_datetime(i) for i in iterable]
def inject_builtins(variables: dict) -> dict:
variables_copy = dict(variables)
variables_copy['RANDOM_STR'] = str(uuid.uuid4())
variables_copy['RANDOM_INT'] = random.randint(-2147483648, 2147483648)
ts = round(time.time(), 6) # from timestamp uses rounding, so we should also use it here, to make them compatible
variables_copy['NOW_TS'] = ts
variables_copy['NOW_DT'] = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%S0+0000')
return variables_copy
def render(source: str, variables: dict) -> str:
template = Template(source)
holder = FiltersFactory()
for filter_mod, value in holder.filters.items():
template.environment.filters[filter_mod] = value
for fun_mod, value in holder.functions.items():
template.globals[fun_mod] = value
try:
return template.render(variables)
except UndefinedError as e:
debug(e.message)
return source
|
py
|
1a5bdf88ad74e007b90f8b8c82d1a06505ea51b2
|
import pytest
from ..py_graph_t.SimpleGraph import SimpleGraph
from ..py_graph_t.exceptions.SimpleGraphException import (
VertexNotExistsException,
EdgeDuplicatedException,
EdgeNotFoundException,
VertexDuplicatedException,
CycleDetectedException
)
from ..py_graph_t.Graph import Graph
from ..py_graph_t.util.ValueBinding import ValueBinding
class TestGraph:
g = Graph()
def teste(self):
g = Graph()
g.vertices = dict()
g.edges = []
g.add_vertex("a")
g.add_vertex("b")
g.add_vertex("c")
g.add_edge("a", "b", name="s")
g.add_edge("b", "c", name="t")
g.add_edge("a", "a", name="d")
list_ = g.incidence_list()
test = ValueBinding("a", "s", 1)
assert list_[0].__eq__(test)
|
py
|
1a5bdf901913c64b7f418c28731bf445ba706aa1
|
import numpy as np
def calculateEarthRadius(lat_deg):
"""
IN RADIANS!!!
"""
lat_rad = np.deg2rad(lat_deg)
major = 6378137.0 # semi-major axis of the earth
minor = 6356752.3142 # semi-minor axis of the earth
radius = np.sqrt((((major**2)*np.cos(lat_rad))**2 + ((minor**2)*np.sin(lat_rad))**2)/
((major*np.cos(lat_rad))**2 + (minor*np.sin(lat_rad))**2)) # defines the radius of the earth at a specific point
return radius
|
py
|
1a5be03fa4a09031e7beabb443398b1166816fdf
|
from django.apps import AppConfig
class StoreConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "store"
|
py
|
1a5be0473a876d6e6c8989e22b44c7fa5f654ce1
|
import os
import unittest
from unittest import mock
from collections import defaultdict
import sys
import pandas as pd
import numpy as np
from dataprofiler.profilers import NumericStatsMixin
from dataprofiler.profilers.profiler_options import NumericalOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestColumn(NumericStatsMixin):
def __init__(self):
NumericStatsMixin.__init__(self)
self.match_count = 0
self.times = defaultdict(float)
def update(self, df_series):
pass
def _filter_properties_w_options(self, calculations, options):
pass
class TestNumericStatsMixin(unittest.TestCase):
@mock.patch.multiple(NumericStatsMixin, __abstractmethods__=set(),
_filter_properties_w_options=mock.MagicMock(
return_value=None),
create=True)
def test_base(self):
# validate requires NumericalOptions
with self.assertRaisesRegex(ValueError,
"NumericalStatsMixin parameter 'options' "
"must be of type NumericalOptions."):
profile = NumericStatsMixin(options='bad options')
try:
# validate doesn't fail
profile = NumericStatsMixin()
profile = NumericStatsMixin(NumericalOptions())
except Exception as e:
self.fail(e)
def test_check_float(self):
"""
Checks if number is float.
:return:
"""
true_asserts = [1.3, 1.345, -1.3, 0.03, 0.0, -0.0, 1, # numeric values
float("nan"), np.nan, # nan values
"1.3", "nan" # strings
]
for assert_val in true_asserts:
self.assertTrue(NumericStatsMixin.is_float(assert_val))
false_asserts = ["1.3a", "abc", "", "1.23.45"]
for assert_val in false_asserts:
self.assertFalse(NumericStatsMixin.is_float(assert_val))
def test_check_int(self):
"""
Checks if number is integer.
:return:
"""
true_asserts = [1, 1345, -13, 0, -0, # numeric values
"1" # strings
]
for assert_val in true_asserts:
self.assertTrue(NumericStatsMixin.is_int(assert_val))
false_asserts = [1.3, # float
float("nan"), np.nan, # nan value
"nan", "1a", "abc", "", "1.3" # strings
]
for assert_val in false_asserts:
self.assertFalse(NumericStatsMixin.is_int(assert_val))
def test_update_variance(self):
"""
Checks update variance
:return:
"""
num_profiler = TestColumn()
# test update variance
data1 = [-3.0, 2.0, 11.0]
mean1 = (-3.0 + 2.0 + 11.0) / 3
var1 = ((-3.0 - mean1) ** 2 + (2.0 - mean1)
** 2 + (11.0 - mean1) ** 2) / 2
count1 = len(data1)
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1 * 2 / 3, count1)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
self.assertAlmostEqual(var1, num_profiler.variance)
# test streaming update variance with new data
data2 = [-5.0, 5.0, 11.0]
mean2 = (-5.0 + 5.0 + 11.0) / 3
var2 = ((-5.0 - mean2) ** 2 + (5.0 - mean2)
** 2 + (11.0 - mean2) ** 2) / 2
count2 = len(data2)
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2 * 2 / 3, count2)
num_profiler.match_count += count2
num_profiler.sum += sum(data2)
var_from_profile_updated = num_profiler.variance
data_all = [-5.0, 5.0, 11.0, -3.0, 2.0, 11.0]
mean_all = (-5.0 + 5.0 + 11.0 - 3.0 + 2.0 + 11.0) / 6
var_all = ((-5.0 - mean_all) ** 2 + (5.0 - mean_all) ** 2 + \
(11.0 - mean_all) ** 2 + (-3.0 - mean_all) ** 2 + \
(2.0 - mean_all) ** 2 + (11.0 - mean_all) ** 2) / 5
self.assertAlmostEqual(var_all, var_from_profile_updated)
def test_update_variance_with_varying_data_length(self):
"""
Checks update variance
:return:
"""
# empty data
data1 = []
mean1, var1, count1 = 0, np.nan, 0
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1, count1)
num_profiler.match_count = count1
num_profiler.sum = 0
self.assertTrue(num_profiler.variance is np.nan)
# data with 1 element
data2 = [5.0]
mean2, var2, count2 = 5.0, 0, 1
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2, count2)
num_profiler.match_count += count2
num_profiler.sum += 5.0
self.assertTrue(num_profiler.variance is np.nan)
# data with multiple elements
data3 = [-5.0, 5.0, 11.0, -11.0]
mean3, count3 = 0, 4
var3 = ((-5.0 - mean3) ** 2 + (5.0 - mean3) ** 2 +
(11.0 - mean3) ** 2 + (-11.0 - mean3) ** 2) / 3
num_profiler = TestColumn()
num_profiler._biased_variance = num_profiler._update_variance(
mean3, var3 * 3 / 4, count3)
num_profiler.match_count += count3
num_profiler.sum += sum(data3)
self.assertEqual(var3, num_profiler.variance)
def test_update_variance_with_empty_data(self):
"""
Checks update variance
:return:
"""
num_profiler = TestColumn()
data1 = [-3.0, 2.0, 11.0]
mean1 = (-3.0 + 2.0 + 11.0) / 3
var1 = ((-3.0 - mean1) ** 2 + (2.0 - mean1)
** 2 + (11.0 - mean1) ** 2) / 2
count1 = len(data1)
num_profiler._biased_variance = num_profiler._update_variance(
mean1, var1 * 2 / 3, count1)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
self.assertEqual(var1, num_profiler.variance)
# test adding data which would not have anything
# data + empty
mean2, var2, count2 = 0, 0, 0
num_profiler._biased_variance = num_profiler._update_variance(
mean2, var2, count2)
num_profiler.match_count = count1
num_profiler.sum = sum(data1)
var_from_profile_updated = num_profiler.variance
# simulate not having data
mean_all, var_all = mean1, var1
self.assertEqual(var_all, var_from_profile_updated)
def test_timeit_merge(self):
"""
Checks profiles have been merged and timed
:return:
"""
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
mock_histogram = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([2., 5.25, 8.5, 11.75, 15.])
}
other1.min, other1.max, other1._biased_variance, other1.sum, \
other1.num_zeros, other1.num_negatives = 0, 0, 0, 0, 0, 0
other2.min, other2.max, other2._biased_variance, other2.sum, \
other2.num_zeros, other2.num_negatives = 1, 1, 1, 1, 1, 1
# set auto as only histogram to merge
other1.histogram_selection = "auto"
other2.histogram_selection = "auto"
other1.histogram_bin_method_names = ['auto']
other2.histogram_bin_method_names = ['auto']
other1._stored_histogram['histogram'] = mock_histogram
other2._stored_histogram['histogram'] = mock_histogram
other1.histogram_selection = 'auto'
time_array = [float(i) for i in range(2, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate profiles are merged and timed.
expected = defaultdict(float, {'histogram_and_quantiles': 1.0})
num_profiler._add_helper(other1, other2)
self.assertEqual(expected, num_profiler.times)
def test_timeit(self):
"""
Checks stat properties have been timed
:return:
"""
num_profiler = TestColumn()
# Dummy data to make min call
prev_dependent_properties = {"mean": 0,
"biased_variance": 0,
"biased_skewness": 0}
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
subset_properties = {"min": 0, "match_count": 0}
time_array = [float(i) for i in range(24, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate _get_min is timed.
expected = defaultdict(float, {'min': 1.0})
num_profiler._get_min(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_max is timed.
expected['max'] = 1.0
num_profiler._get_max(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_sum is timed.
expected['sum'] = 1.0
num_profiler._get_sum(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_variance is timed.
expected['variance'] = 1.0
num_profiler._get_variance(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_skewness is timed
expected['skewness'] = 1.0
num_profiler._get_skewness(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_kurtosis is timed
expected['kurtosis'] = 1.0
num_profiler._get_kurtosis(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_histogram_and_quantiles is timed.
expected['histogram_and_quantiles'] = 1.0
num_profiler._get_histogram_and_quantiles(
df_series, prev_dependent_properties, subset_properties)
self.assertEqual(expected, num_profiler.times)
def test_histogram_bin_error(self):
num_profiler = TestColumn()
# Dummy data for calculating bin error
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([0.0, 4.0, 8.0, 12.0, 16.0])
}
}
input_array = [0, 3, 5, 9, 11, 17]
sum_error = num_profiler._histogram_bin_error(input_array)
# Sum of errors should be difference of each input value to midpoint of bin squared
# bin_midpoints = [2, 6, 10, 14] ids = [1, 1, 2, 3, 3, 4]
assert sum_error == (2-0)**2 + (2-3)**2 + (6-5)**2 + \
(10-9)**2 + (10-11)**2 + (17-14)**2
# Max value test
input_array = [sys.float_info.max, 1.2e308, 1.3e308, 1.5e308]
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([1e308, 1.2e308, 1.4e308, 1.6e308])
}
}
sum_error = num_profiler._histogram_bin_error(input_array)
assert sum_error == np.inf
# Min value test
input_array = [sys.float_info.min, -1.2e308, -1.3e308, -1.5e308]
num_profiler._stored_histogram = {
"histogram": {
"bin_edges": np.array([-1.6e308, -1.4e308, -1.2e308, -1e308])
}
}
sum_error = num_profiler._histogram_bin_error(input_array)
assert sum_error == np.inf
def test_get_best_histogram_profile(self):
num_profiler = TestColumn()
num_profiler._histogram_for_profile = mock.MagicMock(side_effect=[
("hist_1", 3),
("hist_2", 2),
("hist_3", 1)
])
num_profiler.histogram_selection = None
num_profiler.histogram_methods = {
'method_1': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
},
'method_2': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
},
'method_3': {
'total_loss': 0,
'current_loss': 0,
'histogram': None,
'suggested_bin_count': 3
}
}
best_histogram = num_profiler._get_best_histogram_for_profile()
assert best_histogram == "hist_3"
def test_get_best_histogram_profile_infinite_loss(self):
num_profiler = TestColumn()
num_profiler._histogram_for_profile = mock.MagicMock(return_value=("hist_1", 3))
num_profiler.histogram_selection = None
num_profiler.histogram_methods = {
'method_1': {
'total_loss': np.inf,
'current_loss': np.inf,
'histogram': None,
'suggested_bin_count': 3
},
}
best_histogram = num_profiler._get_best_histogram_for_profile()
assert best_histogram == "hist_1"
def test_num_zeros(self):
num_profiler = TestColumn()
# Dummy data to make num_zeros call
prev_dependent_properties = {"mean": 0}
subset_properties = {"num_zeros": 0}
df_series = pd.Series([])
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 0)
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 5)
data = np.array([000., 0.00, .000, 1.11234, 0, -1])
df_series = pd.Series(data)
num_profiler._get_num_zeros(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_zeros"], 4)
def test_num_negatives(self):
num_profiler = TestColumn()
# Dummy data to make num_negatives call
prev_dependent_properties = {"mean": 0}
subset_properties = {"num_negatives": 0}
df_series = pd.Series([])
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 0)
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 0)
data = np.array([1, 0, -.003, -16, -1., -24.45])
df_series = pd.Series(data)
num_profiler._get_num_negatives(df_series, prev_dependent_properties,
subset_properties)
self.assertEqual(subset_properties["num_negatives"], 4)
def test_timeit_num_zeros_and_negatives(self):
"""
Checks num_zeros and num_negatives have been timed
:return:
"""
num_profiler = TestColumn()
# Dummy data to make min call
prev_dependent_properties = {"mean": 0}
data = np.array([0, 0, 0, 0, 0])
df_series = pd.Series(data)
subset_properties = {"num_zeros": 0, "num_negatives": 0}
time_array = [float(i) for i in range(4, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
# Validate _get_min is timed.
expected = defaultdict(float, {'num_zeros': 1.0})
num_profiler._get_num_zeros(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
# Validate _get_max is timed.
expected['num_negatives'] = 1.0
num_profiler._get_num_negatives(
df_series,
prev_dependent_properties,
subset_properties)
self.assertEqual(expected, num_profiler.times)
def test_merge_num_zeros_and_negatives(self):
"""
Checks num_zeros and num_negatives can be merged
:return:
"""
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
other1.num_zeros, other1.num_negatives = 3, 1
other2.num_zeros, other2.num_negatives = 7, 1
num_profiler._add_helper(other1, other2)
self.assertEqual(num_profiler.num_zeros, 10)
self.assertEqual(num_profiler.num_negatives, 2)
num_profiler, other1, other2 = TestColumn(), TestColumn(), TestColumn()
other1.num_zeros, other1.num_negatives = 0, 0
other2.num_zeros, other2.num_negatives = 0, 0
num_profiler._add_helper(other1, other2)
self.assertEqual(num_profiler.num_zeros, 0)
self.assertEqual(num_profiler.num_negatives, 0)
def test_profile(self):
num_profiler = TestColumn()
mock_profile = dict(
min=1.0,
max=1.0,
sum=1.0,
mean=0, # default
variance=np.nan, # default
skewness=np.nan, # default
kurtosis=np.nan, # default
stddev=np.nan, # default
histogram={
'bin_counts': np.array([1, 1, 1]),
'bin_edges': np.array([1.0, 2.0, 3.0, 4.0])
},
quantiles={
0: 2.0,
1: 3.0,
2: 4.0,
},
num_zeros=0, # default
num_negatives=0, # default
times=defaultdict(float), # default
)
num_profiler.match_count = 0
num_profiler.min = mock_profile['min']
num_profiler.max = mock_profile['max']
num_profiler.sum = mock_profile['sum']
num_profiler.histogram_selection = 'auto'
num_profiler.histogram_methods['auto']['histogram'] = \
mock_profile['histogram']
num_profiler.quantiles = mock_profile['quantiles']
num_profiler.times = mock_profile['times']
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), num_profiler.times)
profile = num_profiler.profile()
# pop out the histogram and quartiles to test separately from the
# rest of the dict as we need comparison with some precision
histogram = profile.pop('histogram')
expected_histogram = mock_profile.pop('histogram')
quartiles = profile.pop('quantiles')
expected_quartiles = mock_profile.pop('quantiles')
self.assertDictEqual(mock_profile, profile)
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
self.assertAlmostEqual(expected_quartiles[0], quartiles[0])
self.assertAlmostEqual(expected_quartiles[1], quartiles[1])
self.assertAlmostEqual(expected_quartiles[2], quartiles[2])
def test_diff(self):
"""
Checks _diff_helper() works appropriately.
"""
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 20
# T-stat and Welch's df calculation can be found here:
# https://en.wikipedia.org/wiki/Welch%27s_t-test#Calculations
# Conservative df = min(count1, count2) - 1
# P-value is found using scipy: (1 - CDF(abs(t-stat))) * 2
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': 0.3,
'variance': 10 / 9 - (9 * 20 / 19),
'stddev': np.sqrt(10 / 9) - np.sqrt(9 * 20 / 19),
't-test': {
't-statistic': 0.3923009049186606,
'conservative': {
'df': 9,
'p-value': 0.7039643545772609
},
'welch': {
'df': 25.945257024943864,
'p-value': 0.6980401261750298
}
}
}
difference = other1.diff(other2)
self.assertDictEqual(expected_diff, difference)
# Invalid statistics
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = np.nan # NaN variance
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 20
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': 0.3,
'variance': np.nan,
'stddev': np.nan,
't-test': {
't-statistic': None,
'conservative': {
'df': None,
'p-value': None
},
'welch': {
'df': None,
'p-value': None
}
}
}
expected_var = expected_diff.pop('variance')
expected_stddev = expected_diff.pop('stddev')
with self.assertWarns(RuntimeWarning, msg=
"Null value(s) found in mean and/or variance values. "
"T-test cannot be performed"):
difference = other1.diff(other2)
var = difference.pop('variance')
stddev = difference.pop('stddev')
self.assertDictEqual(expected_diff, difference)
self.assertTrue(np.isnan([expected_var, var, expected_stddev, stddev]).all())
# Insufficient match count
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 6
other2.match_count = 1 # Insufficient count
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': 'unchanged',
'mean': -5.4,
'variance': np.nan,
'stddev': np.nan,
't-test': {
't-statistic': None,
'conservative': {
'df': None,
'p-value': None
},
'welch': {
'df': None,
'p-value': None
}
}
}
expected_var = expected_diff.pop('variance')
expected_stddev = expected_diff.pop('stddev')
with self.assertWarns(RuntimeWarning, msg=
"Insufficient sample size. "
"T-test cannot be performed."):
difference = other1.diff(other2)
var = difference.pop('variance')
stddev = difference.pop('stddev')
self.assertDictEqual(expected_diff, difference)
self.assertTrue(np.isnan([expected_var, var, expected_stddev, stddev]).all())
# Small p-value
other1, other2 = TestColumn(), TestColumn()
other1.min = 3
other1.max = 4
other1._biased_variance = 1
other1.sum = 6
other1.match_count = 10
other2.min = 3
other2.max = None
other2._biased_variance = 9
other2.sum = 60
other2.match_count = 20
expected_diff = {
'min': 'unchanged',
'max': [4, None],
'sum': -54,
'mean': -2.4,
'variance': 10 / 9 - (9 * 20 / 19),
'stddev': np.sqrt(10 / 9) - np.sqrt(9 * 20 / 19),
't-test': {
't-statistic': -3.138407239349285,
'conservative': {
'df': 9,
'p-value': 0.011958658754358975
},
'welch': {
'df': 25.945257024943864,
'p-value': 0.004201616692122823
}
}
}
difference = other1.diff(other2)
self.assertDictEqual(expected_diff, difference)
# Assert type error is properly called
with self.assertRaises(TypeError) as exc:
other1.diff("Inproper input")
self.assertEqual(str(exc.exception),
"Unsupported operand type(s) for diff: 'TestColumn' and"
" 'str'")
|
py
|
1a5be06f9e9ab161f65452b3913ec90b2b24a280
|
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from ..com.gltf2_blender_extras import set_extras
from .gltf2_blender_pbrMetallicRoughness import MaterialHelper, pbr_metallic_roughness
from .gltf2_blender_KHR_materials_pbrSpecularGlossiness import pbr_specular_glossiness
from .gltf2_blender_KHR_materials_unlit import unlit
from io_scene_gltf2.io.imp.gltf2_io_user_extensions import import_user_extensions
class BlenderMaterial():
"""Blender Material."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, material_idx, vertex_color):
"""Material creation."""
pymaterial = gltf.data.materials[material_idx]
import_user_extensions('gather_import_material_before_hook', gltf, pymaterial, vertex_color)
name = pymaterial.name
if name is None:
name = "Material_" + str(material_idx)
mat = bpy.data.materials.new(name)
pymaterial.blender_material[vertex_color] = mat.name
set_extras(mat, pymaterial.extras)
BlenderMaterial.set_double_sided(pymaterial, mat)
BlenderMaterial.set_alpha_mode(pymaterial, mat)
BlenderMaterial.set_viewport_color(pymaterial, mat, vertex_color)
mat.use_nodes = True
while mat.node_tree.nodes: # clear all nodes
mat.node_tree.nodes.remove(mat.node_tree.nodes[0])
mh = MaterialHelper(gltf, pymaterial, mat, vertex_color)
exts = pymaterial.extensions or {}
if 'KHR_materials_unlit' in exts:
unlit(mh)
elif 'KHR_materials_pbrSpecularGlossiness' in exts:
pbr_specular_glossiness(mh)
else:
pbr_metallic_roughness(mh)
import_user_extensions('gather_import_material_after_hook', gltf, pymaterial, vertex_color, mat)
@staticmethod
def set_double_sided(pymaterial, mat):
mat.use_backface_culling = (pymaterial.double_sided != True)
@staticmethod
def set_alpha_mode(pymaterial, mat):
alpha_mode = pymaterial.alpha_mode
if alpha_mode == 'BLEND':
mat.blend_method = 'BLEND'
elif alpha_mode == 'MASK':
mat.blend_method = 'CLIP'
alpha_cutoff = pymaterial.alpha_cutoff
alpha_cutoff = alpha_cutoff if alpha_cutoff is not None else 0.5
mat.alpha_threshold = alpha_cutoff
@staticmethod
def set_viewport_color(pymaterial, mat, vertex_color):
# If there is no texture and no vertex color, use the base color as
# the color for the Solid view.
if vertex_color:
return
exts = pymaterial.extensions or {}
if 'KHR_materials_pbrSpecularGlossiness' in exts:
# TODO
return
else:
pbr = pymaterial.pbr_metallic_roughness
if pbr is None or pbr.base_color_texture is not None:
return
color = pbr.base_color_factor or [1, 1, 1, 1]
mat.diffuse_color = color
|
py
|
1a5be17003c4d0fa815a3a98849095d97f58a6cd
|
from libtooling import *
def call_finder_cb(match, rewriter):
print("function call {} at: {}".format(match.name, match.location.line))
rewriter.append(match.location, "_blub")
def int_finder_cb(match, rewriter):
print("integer {} at: {}".format(match.name, match.location.line))
def heee(v):
print("huhuu {}".format(v))
def fn_finder_cb(match, id):
print(id + ": fn at ")
#call_finder = callExpr()
#call_finder.bind(heee)
#parameterCountIs(0)
#parameterCountIs(1)
#int_finder = varDecl(hasType(isInteger()))
#int_finder.bind(int_finder)
tooling = Tooling("../tests/simple.cc")
fn_finder = functionDecl(hasName("print")).bind("fdg")
tooling.add(fn_finder, fn_finder_cb)
#hub1("hub1")
#hub2("hub2")
#hub3("hub3")
#tooling.add(intFinder)
#tooling.add(call_finder)
tooling.run()
#tooling.write_file("new.cc")
|
py
|
1a5be1cc8f09074f51871b05ca06ebe7db3123ef
|
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'redis_sentinel', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0'
setup(
name='datadog-redis_sentinel',
version=ABOUT['__version__'],
description='The Redis_sentinel check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent redis_sentinel check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='krasnoukhov',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks.redis_sentinel'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
py
|
1a5be37b94ca0087000800290cf3ac9a649b9b63
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common layers used for modeling."""
from typing import Optional, Tuple
import tensorflow as tf
class PadLayer(tf.keras.layers.Layer):
"""Implements circular and regular padding."""
def __init__(self, padding: int, circular_pad: bool = False, **kwargs):
"""Instantiates a PadLayer.
Args:
padding: Size of padding in pixels.
circular_pad: If true, uses circular padding along the width dimension.
**kwargs: Additional arguments passed to tf.keras.layers.Layer.
"""
super().__init__(**kwargs)
self.padding = padding
self.circular_pad = circular_pad
def call(self, inputs: tf.Tensor, training=None) -> tf.Tensor:
"""Implements forward pass for padding.
Args:
inputs: tf.Tensor input of shape (N, H, W, C).
training: Whether the layer is in training mode.
Returns:
tf.Tensor, the normalized output.
"""
batch_size, height, width, channels = inputs.shape
left_pad = tf.zeros((batch_size, height, self.padding, channels),
dtype=inputs.dtype)
right_pad = tf.zeros((batch_size, height, self.padding, channels),
dtype=inputs.dtype)
if self.circular_pad:
left_pad = inputs[:, :, -self.padding:, :]
right_pad = inputs[:, :, :self.padding, :]
top_pad = tf.zeros(
(batch_size, self.padding, width + self.padding * 2, channels),
dtype=inputs.dtype)
bottom_pad = tf.zeros(
(batch_size, self.padding, width + self.padding * 2, channels),
dtype=inputs.dtype)
padded_tensor = tf.concat([left_pad, inputs, right_pad], axis=2)
padded_tensor = tf.concat([bottom_pad, padded_tensor, top_pad], axis=1)
return padded_tensor
class Bottleneck(tf.keras.Model):
"""ResNet bottleneck block."""
def __init__(self,
filters: int = 128,
strides: int = 1,
expansion: int = 4,
downsample=None,
circular_pad: bool = False):
super(Bottleneck, self).__init__()
self.shortcut = None
self.main = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters, kernel_size=1, strides=1, padding='SAME'),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
filters, kernel_size=3, strides=strides, padding='VALID'),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
expansion * filters, kernel_size=1, strides=1, padding='SAME'),
tf.keras.layers.experimental.SyncBatchNormalization(),
])
self.relu = tf.keras.layers.ReLU()
self.downsample = downsample
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
residual = x
out = self.main(x)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SpectralConv(tf.keras.layers.Conv2D):
"""Convolution with spectral normalization applied to weights.
From "Spectral Normalization for Generative Adversarial Networks"
https://arxiv.org/abs/1802.05957
"""
def build(self, input_shape):
was_built = self.built
tf.keras.layers.Conv2D.build(self, input_shape)
self.built = was_built
output_dims = self.kernel.shape[-1]
self.u = self.add_weight(
name=self.name + '_u',
shape=[1, output_dims],
dtype=tf.float32,
initializer=tf.initializers.TruncatedNormal(),
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
if not isinstance(self.padding, (list, tuple)):
self.padding = self.padding.upper()
self.built = True
def call(self, feature, training=None):
"""Forward pass applying spectral normalized convolution.
Args:
feature: Float tensor of shape (N, H, W, C), representing input feature.
training: Represents whether the layer is in training mode.
Returns:
out: Float tensor of shape (N, H, W, output_dims), representing output
feature after applying a spectral normalized convolution.
"""
# For preventing division by 0.
eps = 1e-10
# Flatten weight matrix.
w_shape = self.kernel.shape
w = tf.reshape(self.kernel, [-1, w_shape[-1]])
# One step of power iteration.
v = tf.matmul(self.u, w, transpose_b=True)
v_hat = v / (tf.norm(v) + eps)
u = tf.matmul(v_hat, w)
u_hat = u / (tf.norm(u) + eps)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), u_hat, transpose_b=True)
if training:
self.u.assign(u_hat)
w_norm = w / (sigma + eps)
w_norm = tf.reshape(w_norm, w_shape)
out = tf.nn.conv2d(
input=feature,
filters=w_norm,
strides=self.strides,
dilations=self.dilation_rate,
padding=self.padding)
if self.use_bias:
out = out + self.bias
if self.activation:
out = self.activation(out)
return out
class PartialConv(tf.keras.layers.Conv2D):
"""Partial 2D convolution.
From "Image inpainting for irregular holes using partial convolutions.",
Liu et al., ECCV 2018.
"""
def build(self, input_shape):
was_built = self.built
tf.keras.layers.Conv2D.build(self, input_shape)
self.built = was_built
ks_height, ks_width, _, _ = self.kernel.shape
self.weight_mask_updater = tf.ones((ks_height, ks_width, 1, 1))
self.slide_window_size = ks_height * ks_width * 1
self.built = True
def call(self,
feature: tf.Tensor,
mask: Optional[tf.Tensor] = None,
training=None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Forward pass applying partial convolution.
Args:
feature: Float tensor of shape (N, H, W, C) representing input feature.
mask: Binary float tensor of shape (N, H, W, 1) representing valid pixels.
training: Represents whether the layer is in training mode.
Returns:
out: Float tensor of shape (N, H, W, output_dims), representing output
feature after applying a partial convolution.
"""
if mask is None:
mask = tf.ones((feature.shape[0], feature.shape[1], feature.shape[2], 1))
eps = 1e-6
update_mask = tf.nn.conv2d(
mask,
self.weight_mask_updater,
strides=self.strides,
padding=self.padding.upper())
mask_ratio = self.slide_window_size / (update_mask + eps)
update_mask = tf.clip_by_value(update_mask, 0, 1)
mask_ratio = mask_ratio * update_mask
mask = tf.stop_gradient(mask)
update_mask = tf.stop_gradient(update_mask)
mask_ratio = tf.stop_gradient(mask_ratio)
out = feature * mask
out = tf.nn.conv2d(
input=out,
filters=self.kernel,
strides=self.strides,
padding=self.padding.upper())
if self.bias is not None:
bias = tf.reshape(self.bias, (1, 1, 1, -1))
out = (out - bias) * mask_ratio + bias
out = out * update_mask
else:
out = out * mask_ratio
return out, update_mask
class ResStack(tf.keras.Model):
"""Single ResNet stack consisting of multiple Bottleneck blocks."""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
expansion: int = 4,
circular_pad: bool = False):
super(ResStack, self).__init__()
downsample = None
if strides != 1 or inplanes != planes * expansion:
downsample = tf.keras.Sequential([
tf.keras.layers.Conv2D(
planes * expansion,
kernel_size=1,
strides=strides,
padding='SAME',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
block_models = [
Bottleneck(
planes,
strides=strides,
expansion=expansion,
downsample=downsample,
circular_pad=circular_pad)
]
for _ in range(blocks - 1):
block_models.append(
Bottleneck(planes, expansion=expansion, circular_pad=circular_pad))
self.block = tf.keras.Sequential(block_models)
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
return self.block(x)
class TransBasicBlock(tf.keras.Model):
"""Bottleneck block with transposed convolutions.
This block performs upsampling if required.
"""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
upsample=None,
circular_pad: bool = False):
super(TransBasicBlock, self).__init__()
conv2 = None
if upsample is not None and strides != 1:
conv2 = tf.keras.layers.Conv2DTranspose(
planes,
kernel_size=3,
strides=strides,
padding='SAME',
output_padding=1,
use_bias=False)
else:
conv2 = tf.keras.Sequential([
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
planes,
kernel_size=3,
strides=strides,
padding='VALID',
use_bias=False)
])
self.main = tf.keras.Sequential([
PadLayer(1, circular_pad=circular_pad),
tf.keras.layers.Conv2D(
inplanes, kernel_size=3, strides=1, padding='VALID',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization(),
tf.keras.layers.ReLU(),
conv2,
tf.keras.layers.experimental.SyncBatchNormalization(),
])
self.upsample = upsample
self.relu = tf.keras.layers.ReLU()
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
residual = x
out_x = self.main(x)
if self.upsample is not None:
residual = self.upsample(x)
out_x += residual
out_x = self.relu(out_x)
return out_x
class ResStackTranspose(tf.keras.Model):
"""ResNet stack consisting of transposed blocks.
This stack performs upsampling if required (if strides > 1).
"""
def __init__(self,
inplanes: int,
planes: int,
blocks: int,
strides: int = 1,
circular_pad: bool = False):
super(ResStackTranspose, self).__init__()
upsample = None
if strides != 1:
upsample = tf.keras.Sequential([
tf.keras.layers.Conv2DTranspose(
planes,
kernel_size=2,
strides=strides,
padding='VALID',
use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
elif inplanes != planes:
upsample = tf.keras.Sequential([
tf.keras.layers.Conv2D(
planes, kernel_size=1, strides=strides, use_bias=False),
tf.keras.layers.experimental.SyncBatchNormalization()
])
block_models = []
for _ in range(blocks - 1):
block_models.append(
TransBasicBlock(
inplanes, inplanes, blocks, circular_pad=circular_pad))
block_models += [
TransBasicBlock(
inplanes,
planes,
blocks,
strides,
upsample=upsample,
circular_pad=circular_pad)
]
self.block = tf.keras.Sequential(block_models)
def call(self, x: tf.Tensor, training=None) -> tf.Tensor:
return self.block(x)
|
py
|
1a5be3999adb54e3a214326ffa606f2e7fb24abd
|
# -*- coding: utf-8 -*-
import random
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import (
check_that, check_that_in, equal_to, has_entry, has_length, is_integer, is_none, is_str, is_true, require_that
)
SUITE = {
"description": "Method 'get_contract_logs'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.prop("negative", "type")
@lcc.tags("api", "database_api", "database_api_contracts", "get_contract_logs")
@lcc.suite("Check work of method 'get_contract_logs'", rank=1)
class GetContractLogs(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.piggy = self.get_byte_code("piggy", "code")
self.getPennie = self.get_byte_code("piggy", "pennieReturned()")
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'get_contract_logs'")
def method_main_check(self, get_random_integer_up_to_ten, get_random_integer):
value_amount = get_random_integer_up_to_ten
subscription_callback_id = get_random_integer
max_limit = 1000
lcc.set_step("Create contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.piggy, self.__database_api_identifier, value_amount=value_amount
)
lcc.set_step("Call contract method 'getPennie' and get trx block number")
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.getPennie, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
block_num = broadcast_result["block_num"]
_from = 1
lcc.log_info("Method 'getPennie' performed successfully, block_num: '{}'".format(block_num))
lcc.set_step("Get contract logs from '{}' block to max_limit '{}'".format(_from, max_limit))
params = [
subscription_callback_id, {
"contracts": [contract_id],
"topics": [],
"from_block": _from,
"to_block": max_limit
}
]
response_id = self.send_request(self.get_request("get_contract_logs", params), self.__database_api_identifier)
response = self.get_response(response_id)
require_that("'expired transaction result'", response["result"], is_none())
logs = self.get_notice(subscription_callback_id)
lcc.log_info("Call method 'get_contract_logs' with params: '{}'".format(params))
lcc.set_step("Check contract logs")
require_that("'log has value'", bool(logs), is_true(), quiet=True)
for log in logs:
if check_that("contract_log", log[1], has_length(6)):
contract_id_that_called = self.get_contract_id(
log[1]["address"], address_format=True, new_contract=False
)
require_that("contract_id", contract_id_that_called, equal_to(contract_id), quiet=True)
log_values = log[1]["log"]
for log_value in log_values:
if not self.type_validator.is_hex(log_value):
lcc.log_error("Wrong format of 'log_value', got: {}".format(log_value))
else:
lcc.log_info("'log_value' has correct format: hex")
check_that_in(
log[1],
"data",
is_str(),
"block_num",
is_integer(),
"trx_num",
is_integer(),
"op_num",
is_integer(),
quiet=True
)
@lcc.disabled()
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_contracts", "get_contract_logs")
@lcc.suite("Positive testing of method 'get_contract_logs'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.piggy_contract = self.get_byte_code("piggy", "code")
self.getPennie = self.get_byte_code("piggy", "pennieReturned()")
self.dynamic_fields_contract = self.get_byte_code("dynamic_fields", "code")
self.set_all_values = self.get_byte_code("dynamic_fields", "setAllValues(uint256,string)")
self.get_string = self.get_byte_code("dynamic_fields", "getString()")
def get_random_int(self, _to, _from=1):
amount = random.randrange(_from, _to)
if amount == _to:
return self.get_random_int(_to=_to, _from=_from)
return amount
def get_head_block_number(self):
response_id = self.send_request(
self.get_request("get_dynamic_global_properties"), self.__database_api_identifier
)
head_block_number = self.get_response(response_id)["result"]["head_block_number"]
lcc.log_info("head block number: {}".format(head_block_number))
return head_block_number
def get_contract_logs(self, callback_id=None, contract_id=None, _list=[], _from=None, limit=100, params=None):
if params is None:
params = [
callback_id, {
"contracts": [contract_id],
"topics": _list,
"from_block": _from,
"to_block": limit
}
]
response_id = self.send_request(self.get_request("get_contract_logs", params), self.__database_api_identifier)
return self.get_response(response_id, log_response=True)["result"]
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Check contract logs two identical contract calls")
@lcc.depends_on("API.DatabaseApi.Contracts.GetContractLogs.GetContractLogs.method_main_check")
def check_contract_logs_two_identical_contract_calls(self, get_random_integer):
value_amount = get_random_integer
callback_id = get_random_integer
call_count, _from = 2, 1
lcc.set_step("Create contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.piggy_contract, self.__database_api_identifier, value_amount=value_amount
)
lcc.set_step("Call contract method getPennie two times and get trx block number")
for i in range(call_count):
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.getPennie, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
block_num = broadcast_result["block_num"]
lcc.log_info("Method #'{}' 'getPennie' performed successfully, block_num: '{}'".format(i, block_num))
lcc.set_step("Get contract logs after two identical contract calls")
_from = 0
params = [
callback_id, {
"contracts": [contract_id],
"topics": [],
"from_block": _from,
"to_block": block_num + 1
}
]
result = self.get_contract_logs(params=params)
require_that("'result'", result, is_none())
lcc.log_info("Call method 'get_contract_logs' with params: '{}'".format(params))
lcc.set_step("Check contract logs two identical contract calls")
get_contract_logs_results = self.get_notice(callback_id)
require_that("'log has value'", bool(get_contract_logs_results), is_true(), quiet=True)
for i in range(len(get_contract_logs_results))[:-1]:
check_that(
"'contract logs two identical contract calls are the same'",
get_contract_logs_results[i][1]["address"] == get_contract_logs_results[i + 1][1]["address"], is_true()
)
check_that(
"'contract logs two identical contract calls are the same'",
get_contract_logs_results[i][1]["log"] == get_contract_logs_results[i + 1][1]["log"], is_true()
)
check_that(
"'contract logs two identical contract calls are the same'",
get_contract_logs_results[i][1]["data"] == get_contract_logs_results[i + 1][1]["data"], is_true()
)
@lcc.test("Check contract logs contract call that make two different logs")
@lcc.depends_on("API.DatabaseApi.Contracts.GetContractLogs.GetContractLogs.method_main_check")
def check_contract_logs_contract_call_that_make_two_different_logs(self, get_random_integer, get_random_string):
callback_id = get_random_integer
int_param = get_random_integer
string_param = get_random_string
lcc.set_step("Create 'dynamic_fields' contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.dynamic_fields_contract, self.__database_api_identifier
)
lcc.set_step("Call method 'get_string'")
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.get_string, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
lcc.log_info("Method 'get_string' performed successfully")
lcc.set_step("Call method of dynamic_fields contract: 'set_all_values'")
int_param_code = self.get_byte_code_param(int_param, param_type=int)
string_param_code = self.get_byte_code_param(string_param, param_type=str, offset="40")
method_params = int_param_code + string_param_code
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.set_all_values + method_params, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
block_num = broadcast_result["block_num"]
_from = 1
lcc.log_info("Method 'set_all_values' performed successfully, block_num: '{}'".format(block_num))
lcc.set_step("Get contract logs after two different contract calls")
params = [
callback_id, {
"contracts": [contract_id],
"topics": [],
"from_block": _from,
"to_block": block_num + 1
}
]
self.get_contract_logs(params=params)
lcc.log_info("Call method 'get_contract_logs' with params: '{}'".format(params))
lcc.set_step("Check contract logs contract call that make two different logs")
get_contract_logs_results = self.get_notice(callback_id)
require_that("'log has value'", bool(get_contract_logs_results), is_true(), quiet=True)
for i in range(len(get_contract_logs_results))[:-1]:
check_that(
"'contract logs are not the same'", get_contract_logs_results[i] != get_contract_logs_results[i + 1],
is_true()
)
@lcc.disabled()
@lcc.prop("negative", "type")
@lcc.tags("api", "database_api", "database_api_contracts", "get_contract_logs")
@lcc.suite("Negative testing of method 'get_contract_logs'", rank=3)
class NegativeTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.piggy_contract = self.get_byte_code("piggy", "code")
self.getPennie = self.get_byte_code("piggy", "pennieReturned()")
def get_random_int(self, _to, _from=1):
amount = random.randrange(_from, _to)
if amount == _to:
return self.get_random_int(_to=_to, _from=_from)
return amount
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Call method with negative parameter 'limit'")
@lcc.depends_on("API.DatabaseApi.Contracts.GetContractLogs.GetContractLogs.method_main_check")
def check_contract_logs_with_negative_parameter_limit(self, get_random_integer):
value_amount = get_random_integer
callback_id = get_random_integer
max_limit = 100
lcc.set_step("Create contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.piggy_contract, self.__database_api_identifier, value_amount=value_amount
)
lcc.set_step("Call contract method getPennie")
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.getPennie, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
_from = 1
lcc.log_info("Method 'getPennie' performed successfully")
lcc.set_step("Get negative block number")
negative_block_num = self.get_random_int(_to=max_limit) * -1
lcc.log_info("negative block number: {}".format(negative_block_num))
lcc.set_step("Get contract logs with 'limit' param is negative block number")
params = [callback_id, contract_id, _from, negative_block_num]
response_id = self.send_request(self.get_request("get_contract_logs", params), self.__database_api_identifier)
response = self.get_response(response_id, negative=True)
lcc.log_info(
"Call method 'get_contract_logs' with params: from='{}', limit='{}'".format(_from, negative_block_num)
)
lcc.set_step("Check contract logs")
check_that(
"'get_contract_logs' return error message with '{}' params".format(params),
response,
has_entry("error"),
quiet=True
)
@lcc.test("Call method with parameter 'limit' more than max value")
@lcc.depends_on("API.DatabaseApi.Contracts.GetContractLogs.GetContractLogs.method_main_check")
def check_contract_logs_with_parameter_limit_more_than_max_value(self, get_random_integer):
value_amount = get_random_integer
max_limit = 100
lcc.set_step("Create contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.piggy_contract, self.__database_api_identifier, value_amount=value_amount
)
lcc.set_step("Call contract method getPennie")
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.getPennie, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation, log_broadcast=False)
_from = 1
lcc.log_info("Method 'getPennie' performed successfully")
lcc.set_step("Get limit param more than max limit")
more_than_max_limit = self.get_random_int(_from=max_limit, _to=value_amount)
lcc.log_info("more than limit number: {}".format(more_than_max_limit))
lcc.set_step("Get contract logs with 'limit' param is more than max limit")
params = [contract_id, _from, more_than_max_limit]
response_id = self.send_request(self.get_request("get_contract_logs", params), self.__database_api_identifier)
response = self.get_response(response_id, negative=True)
lcc.log_info(
"Call method 'get_contract_logs' with params: from='{}', limit='{}'".format(_from, more_than_max_limit)
)
lcc.set_step("Check contract logs")
check_that(
"'get_contract_logs' return error message with '{}' params".format(params),
response,
has_entry("error"),
quiet=True
)
@lcc.test("Call method with parameter 'limit' equal zero")
@lcc.depends_on("API.DatabaseApi.Contracts.GetContractLogs.GetContractLogs.method_main_check")
def method_main_check(self, get_random_integer_up_to_ten, get_random_integer):
value_amount = get_random_integer_up_to_ten
subscription_callback_id = get_random_integer
max_limit = 0
error = "Assert Exception: !opts.to_block || *opts.to_block > 0: to_block must be greater than zero"
lcc.set_step("Create contract in the Echo network and get it's contract id")
contract_id = self.utils.get_contract_id(
self, self.echo_acc0, self.piggy_contract, self.__database_api_identifier, value_amount=value_amount
)
lcc.set_step("Call contract method getPennie and get trx block number")
operation = self.echo_ops.get_contract_call_operation(
echo=self.echo, registrar=self.echo_acc0, bytecode=self.getPennie, callee=contract_id
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, log_broadcast=False
)
block_num = broadcast_result["block_num"]
_from = 1
lcc.log_info("Method 'getPennie' performed successfully, block_num: '{}'".format(block_num))
lcc.set_step("Get contract logs from '{}' block to max_limit '{}'".format(_from, max_limit))
params = [
subscription_callback_id, {
"contracts": [contract_id],
"topics": [],
"from_block": _from,
"to_block": max_limit
}
]
response_id = self.send_request(self.get_request("get_contract_logs", params), self.__database_api_identifier)
message = self.get_response(response_id, negative=True)["error"]["message"]
require_that("'expired transaction result'", message, equal_to(error))
|
py
|
1a5be3c7e55762f608f3b442697a32a0add846c1
|
from flask import Flask, render_template , redirect , request
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/process', methods = ['GET','POST'])
def process():
if request.method == "POST":
get_name = request.form.get("input_name")
return redirect(f'about/{get_name}')
return "Error"
@app.route('/about/<string:get_name>')
def about(get_name):
return render_template('about.html',get_name=get_name)
if __name__ == '__main__':
app.run(debug=True)
|
py
|
1a5be431bf37830e041327afecb6de35084e3184
|
# -*- coding: utf-8 -*-
"""
test_util_inventory
~~~~~~~~~~~~~~~~~~~
Test inventory util functions.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import zlib
import posixpath
from six import BytesIO
from sphinx.ext.intersphinx import InventoryFile
inventory_v1 = '''\
# Sphinx inventory version 1
# Project: foo
# Version: 1.0
module mod foo.html
module.cls class foo.html
'''.encode('utf-8')
inventory_v2 = '''\
# Sphinx inventory version 2
# Project: foo
# Version: 2.0
# The remainder of this file is compressed with zlib.
'''.encode('utf-8') + zlib.compress('''\
module1 py:module 0 foo.html#module-module1 Long Module desc
module2 py:module 0 foo.html#module-$ -
module1.func py:function 1 sub/foo.html#$ -
CFunc c:function 2 cfunc.html#CFunc -
std cpp:type 1 index.html#std -
std::uint8_t cpp:type 1 index.html#std_uint8_t -
foo::Bar cpp:class 1 index.html#cpp_foo_bar -
foo::Bar::baz cpp:function 1 index.html#cpp_foo_bar_baz -
a term std:term -1 glossary.html#term-a-term -
ls.-l std:cmdoption 1 index.html#cmdoption-ls-l -
docname std:doc -1 docname.html -
foo js:module 1 index.html#foo -
foo.bar js:class 1 index.html#foo.bar -
foo.bar.baz js:method 1 index.html#foo.bar.baz -
foo.bar.qux js:data 1 index.html#foo.bar.qux -
a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
'''.encode('utf-8'))
def test_read_inventory_v1():
f = BytesIO(inventory_v1)
invdata = InventoryFile.load(f, '/util', posixpath.join)
assert invdata['py:module']['module'] == \
('foo', '1.0', '/util/foo.html#module-module', '-')
assert invdata['py:class']['module.cls'] == \
('foo', '1.0', '/util/foo.html#module.cls', '-')
def test_read_inventory_v2():
f = BytesIO(inventory_v2)
invdata = InventoryFile.load(f, '/util', posixpath.join)
assert len(invdata['py:module']) == 2
assert invdata['py:module']['module1'] == \
('foo', '2.0', '/util/foo.html#module-module1', 'Long Module desc')
assert invdata['py:module']['module2'] == \
('foo', '2.0', '/util/foo.html#module-module2', '-')
assert invdata['py:function']['module1.func'][2] == \
'/util/sub/foo.html#module1.func'
assert invdata['c:function']['CFunc'][2] == '/util/cfunc.html#CFunc'
assert invdata['std:term']['a term'][2] == \
'/util/glossary.html#term-a-term'
assert invdata['std:term']['a term including:colon'][2] == \
'/util/glossary.html#term-a-term-including-colon'
|
py
|
1a5be4d880c608316fb61b8731b04a23d9c654b9
|
from typing import Tuple
import torch
from torch import Tensor
def onehot_inputs_and_targets(inputs: Tensor, targets: Tensor) -> Tuple[Tensor, Tensor]:
expanded_targets = targets.unsqueeze(-1)
fill = inputs.new_ones(expanded_targets.shape)
pred = torch.zeros_like(inputs).scatter_add(-1, inputs.argmax(dim=-1, keepdim=True), fill)
tru = torch.zeros_like(inputs).scatter_add(-1, expanded_targets, fill)
return pred, tru
|
py
|
1a5be511d770f7a5cf252db720d69df666b745c4
|
import unittest
from rossby import Rossby
import requests
class BaseTestClass(unittest.TestCase):
rossby = Rossby()
session = requests.Session()
def plain_request(self, endpoint, params):
resp = self.session.get(f"https://api.weather.gov/{endpoint}", params=params)
return resp.json()
def icon_request(self, endpoint, params):
resp = self.session.get(f"https://api.weather.gov/{endpoint}", params=params)
return resp
|
py
|
1a5be5736171814d190642813936a36ef22231ab
|
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file
import re
import sphinx
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.extlinks',
'sphinx.ext.viewcode']
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build']
project = 'Sphinx'
copyright = '2007-2016, Georg Brandl and the Sphinx team'
version = sphinx.__released__
release = version
show_authors = True
html_theme = 'sphinx13'
html_theme_path = ['_themes']
modindex_common_prefix = ['sphinx.']
html_static_path = ['_static']
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
html_additional_pages = {'index': 'index.html'}
html_use_opensearch = 'http://sphinx-doc.org'
htmlhelp_basename = 'Sphinxdoc'
epub_theme = 'epub'
epub_basename = 'sphinx'
epub_author = 'Georg Brandl'
epub_publisher = 'http://sphinx-doc.org/'
epub_scheme = 'url'
epub_identifier = epub_publisher
epub_pre_files = [('index.xhtml', 'Welcome')]
epub_post_files = [('install.xhtml', 'Installing Sphinx'),
('develop.xhtml', 'Sphinx development')]
epub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js',
'_static/underscore.js', '_static/basic.css',
'search.html', '_static/websupport.js']
epub_fix_images = False
epub_max_image_width = 0
epub_show_urls = 'inline'
epub_use_index = False
epub_guide = (('toc', 'contents.xhtml', u'Table of Contents'),)
epub_description = 'Sphinx documentation generator system manual'
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
latex_elements = {
'fontpkg': '\\usepackage{palatino}',
'passoptionstopackages': '\\PassOptionsToPackage{svgnames}{xcolor}',
'printindex': '\\footnotesize\\raggedright\\printindex',
}
latex_show_urls = 'footnote'
autodoc_member_order = 'groupwise'
todo_include_todos = True
extlinks = {'duref': ('http://docutils.sourceforge.net/docs/ref/rst/'
'restructuredtext.html#%s', ''),
'durole': ('http://docutils.sourceforge.net/docs/ref/rst/'
'roles.html#%s', ''),
'dudir': ('http://docutils.sourceforge.net/docs/ref/rst/'
'directives.html#%s', '')}
man_pages = [
('contents', 'sphinx-all', 'Sphinx documentation generator system manual',
'Georg Brandl', 1),
('man/sphinx-build', 'sphinx-build', 'Sphinx documentation generator tool',
'', 1),
('man/sphinx-quickstart', 'sphinx-quickstart', 'Sphinx documentation '
'template generator', '', 1),
('man/sphinx-apidoc', 'sphinx-apidoc', 'Sphinx API doc generator tool',
'', 1),
]
texinfo_documents = [
('contents', 'sphinx', 'Sphinx Documentation', 'Georg Brandl',
'Sphinx', 'The Sphinx documentation builder.', 'Documentation tools',
1),
]
# We're not using intersphinx right now, but if we did, this would be part of
# the mapping:
intersphinx_mapping = {'python': ('https://docs.python.org/2/', None)}
# Sphinx document translation with sphinx gettext feature uses these settings:
locale_dirs = ['locale/']
gettext_compact = False
# -- Extension interface -------------------------------------------------------
from sphinx import addnodes # noqa
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
from sphinx.util.docfields import GroupedField
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
fdesc = GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
|
py
|
1a5be57e916a52526172922ade7e7265e0546355
|
from typing import List
from src.utils.normalizer import text_normalize
from src.utils.tokenizer import sentences_seg
class SentenceHandler(object):
def sentence_processor(self, sentences,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes a given spacy document and turns them into sentences.
:param doc: The document to use from spacy.
:param min_length: The minimum token length a sentence should be to be considered.
:param max_length: The maximum token length a sentence should be to be considered(long more will be truncated).
:return: Sentences.
"""
to_return = []
for s in sentences:
num_token = len(s.split())
if num_token > max_length:
num_split = num_token//max_length
if num_token%max_length > 0:
num_split += 1
sent_size = num_token//num_split
for i in range(num_split):
start = i*sent_size
end = start + sent_size
if i == num_split - 1:
end = num_token
to_return.append(" ".join(s.split()[start:end]))
elif num_token > min_length:
to_return.append(s)
return to_return
def process(self, body: str,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes the content sentences.
:param body: The raw string body to process
:param min_length: Minimum token length that the sentences must be
:param max_length: Max length token that the sentences mus fall under(long more will be truncated)
:return: Returns a list of sentences.
"""
sentences = sentences_seg(text_normalize(body))
return self.sentence_processor(sentences, min_length, max_length)
def __call__(self, body: str,
min_length: int = 4,
max_length: int = 128) -> List[str]:
"""
Processes the content sentences.
:param body: The raw string body to process
:param min_length: Minimum token length that the sentences must be
:param max_length: Max token length that the sentences mus fall under(long more will be truncated)
:return: Returns a list of sentences.
"""
return self.process(body, min_length, max_length)
|
py
|
1a5be60b778f862c7afb9dbdfcc5064ba4d8874d
|
from setuptools import setup
setup(
name="namedpkg",
version="1.0",
packages=["namedpkg"],
namespace_packages=["namedpkg"],
)
|
py
|
1a5be63fd056001e6ac1545cc4fa983a169ea30b
|
import wx, numpy as np
from .boxutil import cross, multiply, lay, mat
from .imutil import mix_img
from .mark import drawmark
from time import time
class Canvas (wx.Panel):
scales = [0.03125, 0.0625, 0.125, 0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 8, 10, 15, 20, 30, 50]
def __init__(self, parent, autofit=False):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.TAB_TRAVERSAL )
self.img = None
self.back = None
self.mode = 'set'
self.winbox = None
self.conbox = None
self.oribox = None
self.outbak = None
self.outimg = None
self.outrgb = None
self.outbmp = None
self.outint = None
self.buffer = None
lut = np.arange(256*3)
lut.shape = (256,3)
lut = lut.astype(np.uint8)
self.lut = lut
self.rg = (0, 255)
self.cn = 0
self._lut = lut
self._rg = (0, 255)
self._cn = 0
self.marks = {}
self.scaidx = 6
self.autofit = autofit
self.scrbox = wx.DisplaySize()
self.bindEvents()
def bindEvents(self):
for event, handler in [ \
(wx.EVT_SIZE, self.on_size),
(wx.EVT_MOUSE_EVENTS, self.on_mouseevent),
(wx.EVT_IDLE, self.on_idle),
(wx.EVT_PAINT, self.on_paint)]:
self.Bind(event, handler)
def on_mouseevent(self, me):
if me.ButtonDown():
if me.GetButton()==1:
self.oldxy = me.GetX(), me.GetY()
if me.GetButton()==3:
self.fit()
wheel = np.sign(me.GetWheelRotation())
if wheel!=0:
if wheel == 1:
self.zoomout(me.GetX(), me.GetY())
if wheel == -1:
self.zoomin(me.GetX(), me.GetY())
if me.Dragging():
x, y = self.oldxy
self.move(me.GetX()-x, me.GetY()-y)
self.oldxy = me.GetX(), me.GetY()
def initBuffer(self):
box = self.GetClientSize()
self.buffer = wx.Bitmap(*box)
self.winbox = [0, 0, *box]
def fit(self):
oriw = self.oribox[2]-self.oribox[0]
orih = self.oribox[3]-self.oribox[1]
if not self.autofit: a,b,c,d = self.winbox
else:
(a,b),(c,d) = (0,0), self.scrbox
c, d = c*0.9, d*0.9
for i in self.scales[6::-1]:
if oriw*i<c-a and orih*i<d-b: break
self.scaidx = self.scales.index(i)
self.zoom(i, 0, 0)
self.update()
def set_img(self, img):
self.img = img
shp = list(img.shape[1::-1])
if self.oribox and self.oribox[2:] == shp: return
self.conbox = [0, 0, *shp]
self.oribox = [0, 0, *shp]
#if self.conbox is None: self.fit()
def set_back(self, back):
self.back = back
def set_rg(self, rg, b=False):
if b: self._rg = rg
else: self.rg = rg
def set_lut(self, lut, b=False):
if b: self._lut = lut
else: self.lut = lut
def set_cn(self, cn, b=False):
if b: self._cn = cn
else: self.cn = cn
def set_mode(self, mode): self.mode = mode
@property
def scale(self):
conw = self.conbox[2]-self.conbox[0]
oriw = self.oribox[2]-self.oribox[0]
conh = self.conbox[3]-self.conbox[1]
orih = self.oribox[3]-self.oribox[1]
l1, l2 = conw**2+conh**2, oriw**2+orih**2
return l1**0.5 / l2**0.5
def move(self, dx, dy):
arr = np.array(self.conbox)
arr = arr.reshape((2,2))+(dx, dy)
self.conbox = arr.ravel().tolist()
self.update()
def on_size(self, event):
if self.img is None: return
self.initBuffer()
self.update()
def on_idle(self, event):pass
def on_paint(self, event):
if self.buffer is None: return
wx.BufferedPaintDC(self, self.buffer)
def draw_image(self, dc, img, back, mode):
out, bak, rgb = self.outimg, self.outbak, self.outrgb
csbox = cross(self.winbox, self.conbox)
shp = csbox[3]-csbox[1], csbox[2]-csbox[0]
o, m = mat(self.oribox, self.conbox, csbox)
shp = tuple(np.array(shp).round().astype(np.int))
if out is None or (out.shape, out.dtype) != (shp, img.dtype):
self.outimg = np.zeros(shp, dtype=img.dtype)
if not back is None and (
bak is None or (bak.shape, bak.dtype) != (shp, back.dtype)):
self.outbak = np.zeros(shp, dtype=back.dtype)
if rgb is None or rgb.shape[:2] != shp:
self.outrgb = np.zeros(shp+(3,), dtype=np.uint8)
self.outint = np.zeros(shp, dtype=np.uint8)
buf = memoryview(self.outrgb)
self.outbmp = wx.Bitmap.FromBuffer(*shp[::-1], buf)
#if not back is None: print('has back image')
mix_img(back, m, o, shp, self.outbak,
self.outrgb, self.outint,
self._rg, self._lut, cns=self._cn, mode='set')
mix_img(self.img, m, o, shp, self.outimg,
self.outrgb, self.outint,
self.rg, self.lut, cns=self.cn, mode=self.mode)
self.outbmp.CopyFromBuffer(memoryview(self.outrgb))
dc.DrawBitmap(self.outbmp, *csbox[:2])
def update(self):
start = time()
lay(self.winbox, self.conbox)
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
dc.Clear()
self.draw_image(dc, self.img, self.back, 0)
for i in self.marks:
if self.marks[i] is None: continue
if callable(self.marks[i]):
self.marks[i](dc, self.to_panel_coor, k = self.scale)
else:
drawmark(dc, self.to_panel_coor, self.marks[i], k=self.scale)
dc.UnMask()
print('frame rate:',int(1/max(0.001, time()-start)))
def center(self, x, y, coord='win'):
if coord=='data':
x,y = self.to_panel_coor(x, y)
dx = (self.winbox[2]-self.winbox[0])/2 - x
dy = (self.winbox[3]-self.winbox[1])/2 - y
for i,j in zip((0,1,2,3),(dx,dy,dx,dy)):
self.conbox[i] += j
lay(self.winbox, self.conbox)
def zoom(self, k, x, y, coord='win'):
if coord=='data':
x,y = self.to_panel_coor(x, y)
box = np.array(self.conbox).reshape((2,2))
box = (box - (x,y)) / self.scale * k + (x, y)
self.conbox = box.ravel().tolist()
lay(self.winbox, self.conbox)
if not self.autofit: return
a,b,c,d = self.conbox
if c-a<self.scrbox[0]*0.9 and d-b<self.scrbox[1]*0.9:
self.SetInitialSize((c-a+4, d-b+4))
def zoomout(self, x, y, coord='win', grade=True):
self.scaidx = min(self.scaidx + 1, len(self.scales)-1)
self.zoom(self.scales[self.scaidx], x, y, coord)
self.update()
def zoomin(self, x, y, coord='win'):
self.scaidx = max(self.scaidx - 1, 0)
self.zoom(self.scales[self.scaidx], x, y, coord)
self.update()
def to_data_coor(self, x, y):
x = (x - self.conbox[0])/self.scale
y = (y - self.conbox[1])/self.scale
return x, y
def to_panel_coor(self, x, y):
x = x * self.scale + self.conbox[0]
y = y * self.scale + self.conbox[1]
return x, y
def __del__(self):
self.img = self.back = None
print('========== canvas del')
if __name__=='__main__':
msk = np.zeros((512,512), dtype=np.uint8)
msk[100:200,100:200] = 1
msk[200:300,200:300] = 2
msk[300:400,300:400] = 3
lut = np.array([(0,0,0),(255,0,0),(0,255,0),(0,0,255)], dtype=np.uint8)
from skimage.data import astronaut, camera
app = wx.App()
frame = wx.Frame(None)
canvas = Canvas(frame)
canvas.set_img(msk)
canvas.set_lut(lut)
canvas.set_cn(0)
canvas.set_back(astronaut())
canvas.set_cn('rgb', 1)
canvas.set_mode('msk')
x = np.arange(512)
y = np.sin(x/30) * 100 + 256
canvas.marks['line'] = {'type':'line', 'lw':3, 'body':np.array([x,y]).T.tolist()}
frame.Show(True)
app.MainLoop()
|
py
|
1a5be6dca186a3d931e08c53066ce37bfb3eedc6
|
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import itertools
def get_breaks(model, N):
if model == "resnet20_v2":
breaks = {
432: [0, 353, 432],
2304: [0, 1847, 2229, 2304],
4608: [0, 4073, 4544, 4608],
9216: [0, 8164, 9012, 9216],
18432: [0, 16094, 18060, 18432],
36864: [0, 33742, 36595, 36864]}
elif model == "vgg16":
breaks = {
1728: [0, 1443, 1663, 1728],
36864: [0, 34097, 36467, 36815, 36864],
73728: [0, 67595, 73032, 73630, 73728],
147456: [0, 132193, 145286, 147125, 147456],
294912: [0, 272485, 292623, 294580, 294844, 294912],
589824: [0, 553577, 586620, 589431, 589764, 589824],
1179648: [0, 1099105, 1172811, 1179005, 1179543, 1179648],
2359296: [0, 2195844, 2343594, 2357633, 2359102, 2359296]}
elif model == "resnet50":
breaks = {
4096: [0, 3656, 4018, 4096],
9408: [0, 8476, 9165, 9408],
16384: [0, 14406, 16145, 16327, 16384],
36864: [0, 32238, 36292, 36726, 36864],
131072: [0, 121069, 130381, 130989, 131072],
32768: [0, 29429, 32320, 32692, 32768],
147456: [0, 133258, 145944, 147255, 147456],
65536: [0, 58690, 64507, 65371, 65536],
524288: [0, 494762, 522078, 524067, 524238, 524288],
589824: [0, 539407, 584654, 589214, 589738, 589824],
262144: [0, 237433, 259437, 261782, 262062, 262144],
2097152: [0, 1990620, 2088919, 2096322, 2097036, 2097152],
2359296: [0, 2188168, 2341896, 2356580, 2358793, 2359296],
1048576: [0, 981145, 1041707, 1047784, 1048461, 1048576],
2050048: [0, 1980923, 2044274, 2049225, 2049929, 2050048]}
return breaks[N]
def find_breaks(curve, num_of_segments=2):
y=curve
breaks = []
break_index = 0
breaks.append(break_index)
for i in range(num_of_segments):
line = np.linspace(y[0], y[-1], len(y))
distance = list(np.abs(line - y))
break_index += distance.index(max(distance))
breaks.append(break_index)
y=curve[break_index:]
breaks.append(len(curve))
return breaks
def get_num_of_segments(model, N):
if model == "resnet20_v2":
segments = {2304: 3, 4608: 3, 9216: 3, 18432: 3, 36864: 3} # 432
elif model == "vgg16":
segments = {1728: 3, 36864: 4, 73728: 4, 147456: 4, 294912: 5, 589824: 5, 1179648: 5, 2359296: 5}
elif model == "resnet50":
segments = {4096: 3, 9408: 3, 16384: 4, 36864: 4, 131072: 4, 32768: 4, 147456: 4, 65536: 4, 524288: 5,
589824: 5, 262144: 5, 2097152: 5, 2359296: 5, 1048576: 5, 2050048: 5}
return segments[N]
def GetInputMatrix_Polynomial(xcol, x):
N = len(x)
Xtrans = [np.ones(N)]
for i in range(1, xcol):
Xtrans = np.vstack([Xtrans, np.power(x, i)])
X = np.transpose(Xtrans)
return X
polynomial_degree = 4
pd.set_option("display.precision", 40)
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default="./", help='')
parser.add_argument('--model', type=str, default="./", help='')
args = parser.parse_args()
path = args.path
Y = pd.read_csv(path+'/values.csv', header=None, sep="\n")[0].values
coefficients = pd.read_csv(path+'/coefficients.csv', header=None, sep="\n")[0].values
#print(Y) ; print(coefficients)
N = Y.size
num_of_segments = get_num_of_segments(args.model, N)
y_abs = np.abs(Y)
mapping = np.argsort(y_abs, axis=0)
sorted_Y = y_abs[mapping]
# breaks = find_breaks(sorted_Y, num_of_segments-1)
breaks = get_breaks(args.model, N)
sizes = [breaks[i+1]-breaks[i] for i in range(num_of_segments)]
negative_indices = np.where(np.less(Y[mapping], 0))[0]
Nneg = negative_indices.size
mask = np.ones(N)
mask[negative_indices] = -np.ones(Nneg)
y_est_abs = []
x_segments_ = [] ; x_segments = [] ; y_segments = [] ; X_segments = []
for i in range(num_of_segments):
x_segments_ += [np.arange(breaks[i], breaks[i + 1])]
x_segments += [np.cast['float64'](np.arange(0, sizes[i]))]
y_segments += [sorted_Y[breaks[i]: breaks[i + 1]]]
X_segments += [GetInputMatrix_Polynomial(polynomial_degree, x_segments[i])]
offset = i*polynomial_degree
y_est_abs += [np.matmul(X_segments[i], coefficients[offset : offset+polynomial_degree])]
y_est_abs_np = np.concatenate(y_est_abs)
y = y_est_abs_np * mask
# Compute Root Mean Squared Error
rmse = np.sqrt(np.sum(np.power(sorted_Y-y_est_abs_np, 2))/N)
plt.rcParams["figure.figsize"] = [20, 10]
print(rmse)
colors = itertools.cycle(['yo', 'ro', 'go', 'yo', 'mo'])
with open(path+'/rmse.txt', 'w') as f:
f.write(str(rmse) + "\n")
mapping = mapping+1
plt.plot(range(1, N+1), Y, 'mo', markersize=5, label="True")
plt.plot(mapping, y, 'c.', markersize=5, label="Estimated Values")
plt.plot(range(1, N+1), sorted_Y, 'bo', markersize=6, label="True Sorted")
for x, X, c, y_est, color in zip(x_segments_, X_segments, coefficients, y_est_abs, colors):
plt.plot(x, y_est, color, markersize=2, label="Estimates")
plt.plot(breaks, np.zeros(len(breaks)), 'ko', markersize=6, label="Breaks")
plt.legend()
# plt.show()
plt.savefig(path + 'gradient.png')
|
py
|
1a5be7db8c974a3768666d9ba5b13584a1a82b1c
|
# Problem 217: Contains Duplicate
class Solution:
# Approach 1 - Using sort
def containsDuplicate(self, nums) -> bool:
nums.sort()
for index in range(len(nums)-1):
if nums[index] == nums[index+1]:
return True
return False
# Approach 2 - Using built-in count method
def containsDuplicate2(self, nums) -> bool:
for num in nums:
if nums.count(num) > 1:
return True
return False
# Approach 3 - Using visited list
def containsDuplicate3(self, nums) -> bool:
foo = []
for num in nums:
if num in foo:
return True
else:
foo.append(num)
return False
# Approach 4 - Using Set
def containsDuplicate4(self, nums: List[int]) -> bool:
return True if len(set(nums)) < len(nums) else False
# Test
solution = Solution()
# Expected: True
nums = [1,2,3,4,5,6,1]
print(solution.containsDuplicate(nums))
|
py
|
1a5be8121ec0bee92ee194e47a72c3191c7ef2ae
|
from appinit.lib.db import Manager
class PermissionManager(object):
def __init__(self, session):
self.manager = Manager()
self.db = self.manager.db('appinit')
self.session = session
# self.settings = settings
def get_application_uids(self, application, permission):
app = PermissionsApplication(self.db, application)
return app.get_uids(permission)
def list_user_permissions(self):
user = PermissionsUser(self.db, self.session.uid)
return user.list_permissions()
def get_application(self, app=None):
permissions = self.list_user_permissions()
if app == None:
return permissions
else:
if app in permissions:
return permissions[app]
else:
return []
class Permission(object):
def __init__(self):
pass
class PermissionsApplication(object):
def __init__(self, db, application):
self.application = application
self.db = db
# users.permissions.get
def get_uids(permission):
pipeline = [
{ "$match": {"application": self.application}},
{ "$unwind": "$permissions" },
{ "$group":
{
"_id": "$permissions",
"uids": {
"$addToSet": "$uid",
},
}
},
{ "$match":
{
"_id": permission,
}
},
]
cursor = self.db.permissions.aggregate(pipeline)
if cursor != None:
for i in cursor:
return i['uids']
return None
# permissions.applications.add
def add(self, uid, permission):
cursor = self.db.permissions.find_one({
"application": self.application,
"uid": uid,
})
if cursor is None:
add_user.call(uid=uid, application=self.application)
self.db.permissions.update(
{
"application": self.application,
"uid": kwargs["uid"]
},
{
"$push": {
"permissions": permission
}
}
)
return get_application.call(application=self.application)
class PermissionsUser(object):
def __init__(self, db, uid):
self.uid = uid
self.db = db
self.manager = Manager()
def list_permissions(self):
permissions = {}
apps = self.manager.get_application()
apps.append({"name": "system"})
for app in apps:
if app['name'] != "system":
list_name = app['api']['name'].split("_")
camel_case = ''.join([list_name[x].title() for x in range(1, len(list_name))])
name = list_name[0] + camel_case
else:
name = app['name']
permissions[name] = {}
all_permissions = self.db.permissions.find({"application": app['name']}).distinct("permissions")
user_permissions = self.db.permissions.find_one({"uid": self.uid, "application": app['name']})
if user_permissions != None:
all_true = False
if user_permissions['application'] == app['name']:
all_true = "admin" in user_permissions['permissions']
for p in user_permissions['permissions']:
key = 'is_' + p
if all_true:
permissions[name][key] = True
elif p in all_permissions:
permissions[name][key] = True
else:
permissions[name][key] = False
return permissions
class PermissionsModule(object):
def __init__(self):
pass
|
py
|
1a5be81871e6c6c088c9b99cf3515545fa771826
|
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
ATTR_NOT_SPECIFIED = object()
class Mapping(object):
def __init__(self, mapping):
self.direct_mapping = mapping
self.reverse_mapping = {}
for key, value in mapping.items():
self.reverse_mapping[value] = key
_SINGLETON_MAPPING = Mapping({
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
})
class KingbirdSerializer(oslo_messaging.Serializer):
def __init__(self, base=None):
super(KingbirdSerializer, self).__init__()
self._base = base
def serialize_entity(self, context, entity):
if isinstance(entity, dict):
for key, value in entity.items():
entity[key] = self.serialize_entity(context, value)
elif isinstance(entity, list):
for i, item in enumerate(entity):
entity[i] = self.serialize_entity(context, item)
elif entity in _SINGLETON_MAPPING.direct_mapping:
entity = _SINGLETON_MAPPING.direct_mapping[entity]
if self._base is not None:
entity = self._base.serialize_entity(context, entity)
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict):
for key, value in entity.items():
entity[key] = self.deserialize_entity(context, value)
elif isinstance(entity, list):
for i, item in enumerate(entity):
entity[i] = self.deserialize_entity(context, item)
elif entity in _SINGLETON_MAPPING.reverse_mapping:
entity = _SINGLETON_MAPPING.reverse_mapping[entity]
if self._base is not None:
entity = self._base.deserialize_entity(context, entity)
return entity
def serialize_context(self, context):
if self._base is not None:
context = self._base.serialize_context(context)
return context
def deserialize_context(self, context):
if self._base is not None:
context = self._base.deserialize_context(context)
return context
|
py
|
1a5beaee410b4dc01a4045d54782315c624aa389
|
import sys
import matplotlib.pyplot as plt
from imtoolkit import Parameters, IMCode, IdealRayleighChannel, CoherentMLDSimulator
plt.switch_backend('agg')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['markers.fillstyle'] = 'none'
def simulateAMI(argstr):
params = Parameters(argstr)
code = IMCode(params.dm, params.M, params.K, params.Q, params.mod, params.L, meanPower=1)
channel = IdealRayleighChannel(params.ITi, params.M, params.N)
sim = CoherentMLDSimulator(code.codes, channel)
return sim.simulateAMIParallel(params, outputFile=False, printValue=False)
if __name__ == '__main__':
fig, ax = plt.subplots()
ax.set_xlabel("SNR [dB]")
ax.set_ylabel("AMI [bit/symbol]")
ax.set_xlim(-20, 20)
ax.set_ylim(0, 4)
ax.tick_params(pad = 8)
ret = simulateAMI("AMIP_sim=coh_code=index_dm=dic_M=4_K=4_Q=1_L=2_mod=PSK_N=4_ITo=1_ITi=1e4_snrfrom=-20.00_to=20.00_len=21")
ax.plot(ret["snr_dB"], ret["ami"], color="k", marker="s", linestyle="-", label="BLAST")
ret = simulateAMI("AMIP_sim=coh_code=index_dm=opt_M=4_K=1_Q=4_L=4_mod=PSK_N=4_ITo=1_ITi=1e4_snrfrom=-20.00_to=20.00_len=21")
ax.plot(ret["snr_dB"], ret["ami"], color="r", marker="o", linestyle="-", label="Spatial modulation")
handles, labels = ax.get_legend_handles_labels()
legend = ax.legend(handles, labels, loc="best", frameon=True)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('white')
#plt.show()
plt.savefig(sys.argv[0].replace(".py", ".svg"))
|
py
|
1a5bebaf8253a329655313892466fbb444c7021e
|
import comms as comms
import datetime as datetime
import json as json
import uuid as uuid
import time as time
class DeviceTable:
def __init__(self, sys_settings_fname, device_table_fname):
self.device_table_fname = device_table_fname
self.sub_list = [b'new_arp_pkt']
self.comms = comms.Comms(sys_settings_fname)
self.comms.set_subscriptions(self.sub_list)
self.device_list = load_device_table(device_table_fname)
self.device_lut = update_device_lut(self.device_list)
self.comms.send_msg('new_table', self.device_list)
self.last_new_table_pub_t = 0
def run_device_table_routine(self):
msg = self.comms.recv_msg()
if msg:
self.process_message(msg)
time.sleep(0.1)
def process_message(self, msg):
payload = json.loads(msg[1].decode('utf-8'))
src_mac = payload['sender_mac_as_str_with_colons']
src_ip = payload['sender_ip_as_str_with_dots']
pub_new_table_flag = False
now_iso_fmt = datetime.datetime.now().isoformat()
if src_mac in self.device_lut.keys():
self.device_lut[src_mac]['last_seen'] = now_iso_fmt
device_ip = self.device_lut[src_mac]['ip']
if device_ip != src_ip:
pub_new_table_flag = True
self.device_lut[src_mac]['ip'] = src_ip
else:
new_device = {
'id': str(uuid.uuid4()),
'mac': src_mac,
'ip': src_ip,
'last_seen': now_iso_fmt
}
self.device_list.append(new_device)
self.device_lut = update_device_lut(self.device_list)
pub_new_table_flag = True
# TODO: save data to database
save_device_table(self.device_table_fname, self.device_list)
if pub_new_table_flag or time.time() - self.last_new_table_pub_t > 5.0:
self.last_new_table_pub_t = time.time()
self.comms.send_msg('new_table', self.device_list)
def clean_up(self):
self.comms.close_pub_sub()
def load_device_table(device_table_fname):
# TODO: Load from db
with open(device_table_fname, 'r') as f:
tmp_dict = json.load(f)
return tmp_dict['devices']
def save_device_table(device_table_fname, device_list):
with open(device_table_fname, 'w') as f:
out_dict = {
'devices': device_list
}
json.dump(out_dict, f, indent=2)
def update_device_lut(device_list):
device_lut = {
dev['mac']: dev for dev in device_list
}
return device_lut
def main():
mt = DeviceTable('sys_settings.json', 'device_table.json')
is_running = True
print('Starting device table manager...')
while is_running:
try:
mt.run_device_table_routine()
except KeyboardInterrupt:
print('Closing device table manager.')
is_running = False
mt.clean_up()
if __name__ == '__main__':
main()
|
py
|
1a5bee46bc90b8fc59eff20437777958299fc238
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatter3d.marker.colorbar.title.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
py
|
1a5bef3a17822967310dd327b13034865e66f868
|
import numpy as np
import re
from replay_memory import NStepReplayMemory
def make_legacy_replay_memory(return_est, capacity, history_len, discount):
match = re.match('nstep-([0-9]+)', return_est)
if not match:
raise ValueError('Legacy mode only supports n-step returns but requested {}'.format(return_est))
n = int(match.group(1))
return LegacyReplayMemory(capacity, history_len, discount, n)
class LegacyReplayMemory(NStepReplayMemory):
def __init__(self, capacity, history_len, discount, n):
super().__init__(capacity, history_len, discount, cache_size=0, block_size=n, priority=0.0, n=n)
def sample(self, batch_size):
indices = self._sample_block_ids(batch_size)
return self._sample(indices) # Separate function for unit testing
def _sample(self, indices):
state_batch, action_batch, reward_batch, done_batch = [], [], [], []
for i in indices:
state_batch.append( self._extract_block(None, i, states=True) )
action_batch.append( self._extract_block(self.actions, i) )
reward_batch.append( self._extract_block(self.rewards, i) )
done_batch.append( self._extract_block(self.dones, i).astype(np.float32) )
state_batch, action_batch, reward_batch, done_batch = map(np.array, [state_batch, action_batch, reward_batch, done_batch])
# Compute the n-step returns
return_batch = self.refresh_func(state_batch[:, -1]) # Begin with bootstrap states
for i in reversed(range(self.n)):
return_batch = reward_batch[:, i] + self.discount * return_batch * (1.0 - done_batch[:, i])
return state_batch[:, 0], action_batch[:, 0], return_batch
def refresh(self, cache_size, train_frac):
raise NotImplementedError
def _refresh(self, cache_size, train_frac, block_ids):
raise NotImplementedError
def _calculate_returns(self, rewards, qvalues, dones, mask):
raise NotImplementedError
|
py
|
1a5bef8dffb3bcc8a5f34c3233f0d7665da28d09
|
import sys, os
DEFAULT_VERS = "310"
SOURCE_DIR = "source"
HOOK_MAGIC = "// hook_from "
buildVersion = None
patchConfig = {
"build_id" : {},
"nso_load_addr" : {},
}
def initConfig():
configPath = os.path.join(PATCH_CONFIG_DIR, buildVersion + PATCH_CONFIG_EXTENSION)
# read config file
with open(configPath) as configFile:
curConfigName = None
for line in configFile:
line = line.strip()
configNameLineMatch = re.match(r'\[(.+)\]', line)
if configNameLineMatch:
curConfigName = configNameLineMatch.group(1)
continue
if '=' in line:
configNSO, configValue = line.split('=', 1)
if not configNSO.isalnum():
continue
if '+' in configValue:
print("genPatch.py error:", line, "awaits implementation")
sys.exit(-1)
patchConfig[curConfigName][configNSO] = configValue
def calcJump(from_addr_str, dest_func, vers=DEFAULT_VERS):
from_addr = int(from_addr_str, 16)
dest_func = dest_func + "("
mapFilePath = "build" + vers + "/skyline" + vers + ".map"
with open(mapFilePath, 'r') as f:
mapFile = f.read()
foundPos = mapFile.find(dest_func) - 34
foundLine = mapFile[foundPos:mapFile.find("\n", foundPos)]
print("Found:")
print(foundLine)
func_addr = int(foundLine[:foundLine.find(" ")], 0)
jump_offset = patchConfig["nso_load_addr"]["subsdk1"] + func_addr - from_addr
print("Jump needed: " + hex(jump_offset))
initConfig()
if len(sys.argv) > 3:
calcJump(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) > 2:
calcJump(sys.argv[1], sys.argv[2])
else:
hasOutput = False
for root, subdirs, files in os.walk(SOURCE_DIR):
for file in files:
with open(root+"/"+file, 'r') as f:
file_iter = iter(f.readlines())
for line in file_iter:
if HOOK_MAGIC in line:
hook_addr = line[len(HOOK_MAGIC):-1]
line = next(file_iter)
hook_func = line[:line.find('(')]
hook_func = hook_func[hook_func.rfind(' ') + 1:]
calcJump(hook_addr, hook_func)
hasOutput = True
if not hasOutput:
print("Usage: %s [from addr] [to func name] (s2 vers, like '310')" % sys.argv[0])
|
py
|
1a5bf04ac4087c9fe95932217c16e8196cacc806
|
# Select libraries that will be imported into PyNite for the user
from PyNite.FEModel3D import FEModel3D
|
py
|
1a5bf07eb5e31048472941973849e88efd748b0c
|
import rlkit.misc.hyperparameter as hyp
from rlkit.demos.source.dict_to_mdp_path_loader import EncoderDictToMDPPathLoader
from rlkit.launchers.experiments.ashvin.awac_rig import awac_rig_experiment
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, GaussianMixturePolicy
from rlkit.envs.encoder_wrappers import PresamplingEncoderWrappedEnv
from sawyer_control.envs.sawyer_grip import SawyerGripEnv
#from sawyer_control.envs.sawyer_grip_stub import SawyerGripEnv
from rlkit.torch.networks import Clamp
from rlkit.torch.vae.vq_vae import VQ_VAE
from rlkit.torch.vae.vq_vae_trainer import VQ_VAETrainer
from rlkit.torch.grill.common import train_vqvae
path_func = lambda name: '/media/ashvin/data2/data/fixed_data_overtrained/'+ name
mini_demos = [
dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True, data_split=0.25),
]
all_demos = [
dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True,),
dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True,),
]
if __name__ == "__main__":
variant = dict(
imsize=48,
env_class=SawyerGripEnv,
env_kwargs=dict(
action_mode='position',
config_name='ashvin_config',
reset_free=False,
position_action_scale=0.05,
max_speed=0.4,
step_sleep_time=0.2,
crop_version_str="crop_val_torch",
),
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256, ],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3e-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=0, #25001 #HERE
policy_weight_decay=1e-4,
q_weight_decay=0,
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
compute_bc=True,
reparam_weight=0.0,
awr_weight=1.0,
bc_weight=0.0,
reward_transform_kwargs=None,
terminal_transform_kwargs=None,
),
max_path_length=75, #50
algo_kwargs=dict(
batch_size=1024, #1024
num_epochs=101, #1001
num_eval_steps_per_epoch=600, #500
num_expl_steps_per_train_loop=600, #500
num_trains_per_train_loop=600, #500
min_num_steps_before_training=150, #150
),
replay_buffer_kwargs=dict(
fraction_future_context=0.6,
fraction_distribution_context=0.1, # TODO: Try less?
max_size=int(5E5), # HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE (DOUBLE CHECK THAT DEMOS FIT!!!!)
),
demo_replay_buffer_kwargs=dict(
fraction_future_context=0.6,
fraction_distribution_context=0.1, # TODO: Try less?
),
reward_kwargs=dict(
reward_type='sparse',
epsilon=1.0,
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
save_video=True,
save_video_kwargs=dict(
save_video_period=1,
pad_color=0,
),
encoder_wrapper=PresamplingEncoderWrappedEnv, # Uncomment if using pixelcnn
reset_keys_map=dict(
image_observation="initial_latent_state"
),
path_loader_class=EncoderDictToMDPPathLoader,
path_loader_kwargs=dict(
recompute_reward=True,
),
renderer_kwargs=dict(
create_image_format='HWC',
output_image_format='CWH',
flatten_image=True,
width=48,
height=48,
),
add_env_demos=False,
add_env_offpolicy_data=False,
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
evaluation_goal_sampling_mode="presampled_images",
exploration_goal_sampling_mode="presampled_conditional_prior",
train_vae_kwargs=dict(
imsize=48,
beta=1,
beta_schedule_kwargs=dict(
x_values=(0, 250),
y_values=(0, 100),
),
num_epochs=1501, #1501
embedding_dim=5,
dump_skew_debug_plots=False,
decoder_activation='sigmoid',
use_linear_dynamics=False,
generate_vae_dataset_kwargs=dict(
N=1000,
n_random_steps=2,
test_p=.9,
dataset_path={
'train': 'demos/icra2021/dataset_v1_train.npy',
'test': 'demos/icra2021/dataset_v1_test.npy',
},
augment_data=False,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
random_rollout_data_set_to_goal=True,
conditional_vae_dataset=True,
save_trajectories=False,
enviorment_dataset=False,
tag="ccrig_tuning_orig_network",
),
vae_trainer_class=VQ_VAETrainer,
vae_class=VQ_VAE,
vae_kwargs=dict(
input_channels=3,
imsize=48,
),
algo_kwargs=dict(
key_to_reconstruct='x_t',
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=128,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
weight_decay=0.0,
skew_dataset=False,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=10,
),
train_model_func=train_vqvae,
presampled_goal_kwargs=dict(
eval_goals='/media/ashvin/data2/data/val/v1/curr_goal_eval_goals.pkl',
expl_goals=None,
),
launcher_config=dict(
unpack_variant=True,
region='us-west-1',
),
logger_config=dict(
snapshot_mode='gap',
snapshot_gap=1,
),
pickle_paths=True,
pretrained_vae_path=path_func('best_vqvae.pt'),
pretrained_algo_path='/home/ashvin/data/ashvin/icra2021/final/fixed-agent-overtrained/run18/id0/itr_1.pt',
#pretrained_algo_path=path_func('pretrained_agent_eps.pt'), #pretrained_agent.pt (fixed data polciy), pretrained_agent_eps.pt (fixed data polciy), drawer_agent.pt (drawer data policy)
)
search_space = {
"seed": range(1),
'path_loader_kwargs.demo_paths': [mini_demos], #CHANGED
'deterministc_eval': [False],
'reward_kwargs.epsilon': [1.0,], #1.75 is mean
'trainer_kwargs.beta': [0.3],
'num_pybullet_objects':[None],
'policy_kwargs.min_log_std': [-6],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.awr_use_mle_for_vf': [True],
'trainer_kwargs.awr_sample_actions': [False],
'trainer_kwargs.clip_score': [2],
'trainer_kwargs.awr_min_q': [True],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0)],
'qf_kwargs.output_activation': [Clamp(max=0)],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
if variant['pretrained_algo_path'] == path_func('pretrained_agent.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=2.0)
if variant['pretrained_algo_path'] == path_func('pretrained_agent_eps.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=1.0)
if variant['pretrained_algo_path'] == path_func('drawer_agent.pt'):
variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=1.0)
variants.append(variant)
run_variants(awac_rig_experiment, variants, run_id=21) #HERE
|
py
|
1a5bf1320d17c933ec4f35fcaa94c14a60f1bb57
|
from . import cursor, db
class College():
def __init__(
self,
code: str = None,
name: str = None) -> None:
self.code = code
self.name = name
def get_all(self, page_num: int = None, item_per_page: int = None, paginate: bool = True) -> list:
if not paginate:
return self.college_list()
offset = (page_num - 1) * item_per_page
query = f'''
SELECT college.code, college.name, COUNT(*) AS courses, enrolled.student as enrolled
FROM college
JOIN course
ON college.code = course.college
LEFT JOIN (SELECT collegecode, COUNT(*) as student
FROM students
GROUP BY collegecode) enrolled
ON college.code = enrolled.collegecode
GROUP BY college.code
LIMIT {item_per_page} OFFSET {offset}
'''
cursor.execute(query)
result = cursor.fetchall()
colleges = [list(college) for college in result]
all_colleges = self.college_list()
for college in all_colleges:
if college[0] not in [code[0] for code in colleges]:
colleges.append([college[0], college[1], None, None])
return colleges
@staticmethod
def get_total() -> int:
query = '''SELECT * FROM college'''
cursor.execute(query)
result = cursor.fetchall()
total = len(result)
return total
def college_list(self) -> list:
query = '''
SELECT code, name
FROM college;
'''
cursor.execute(query)
result = cursor.fetchall()
colleges = [list(college) for college in result]
return colleges
@staticmethod
def get_departments() -> list:
query = '''
SELECT college.code, course.name
FROM college
JOIN course
ON college.code = course.college
'''
cursor.execute(query)
result = cursor.fetchall()
departments = [list(department) for department in result]
return departments
def search(self, keyword: str = None, field: str = None) -> list:
keyword = keyword.upper()
colleges = self.get_all(paginate=False)
result = []
if field is None:
result = self.search_by_field(colleges, keyword, 'all')
elif field == 'code':
result = self.search_by_field(colleges, keyword, 'code')
elif field == 'name':
result = self.search_by_field(colleges, keyword, 'name')
elif field == 'coursecount':
result = self.search_by_field(colleges, keyword, 'coursecount')
elif field == 'studentcount':
result = self.search_by_field(colleges, keyword, 'studentcount')
return result
@staticmethod
def search_by_field(rows: list = None, keyword: str = None, field: str = None) -> list:
result = []
for row in rows:
row_allcaps = [str(cell).upper() for cell in row]
if field == 'all':
if keyword in row_allcaps:
result.append(row)
if field == 'code':
if keyword == row_allcaps[0]:
result.append(row)
return result
elif field == 'name':
if keyword == row_allcaps[1]:
result.append(row)
elif field == 'coursecount':
if keyword in row_allcaps[2]:
result.append(row)
elif field == 'studentcount':
if keyword in row_allcaps[3]:
result.append(row)
return result
def add_new(self) -> None:
query = f'''
INSERT INTO college (
code,
name)
VALUES (
'{self.code}',
'{self.name}')
'''
cursor.execute(query)
db.commit()
return None
@staticmethod
def delete(code: str = None) -> None:
query = f'''
DELETE FROM college
WHERE code='{code}'
'''
cursor.execute(query)
db.commit()
return None
def update(self) -> None:
query = f'''
UPDATE college
SET
code = '{self.code}',
name = '{self.name}'
WHERE
code = '{self.code}'
'''
cursor.execute(query)
db.commit()
return None
@staticmethod
def get_collegecode_for(course_name: str = None) -> str:
query = f'''
SELECT code
FROM college
WHERE name = '{course_name}'
'''
cursor.execute(query)
code = cursor.fetchone()[0]
return code
@staticmethod
def get_collegecodes() -> list:
query = '''
SELECT code
FROM college
'''
cursor.execute(query)
result = cursor.fetchall()
CODES = [code[0] for code in result]
return CODES
|
py
|
1a5bf2c33c161ac6107bed098f7bb723695cde46
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import os
import shutil
import sys
import tempfile
from pyflink.dataset import ExecutionEnvironment
from pyflink.table import BatchTableEnvironment, TableConfig
def word_count():
content = "line Licensed to the Apache Software Foundation ASF under one " \
"line or more contributor license agreements See the NOTICE file " \
"line distributed with this work for additional information " \
"line regarding copyright ownership The ASF licenses this file " \
"to you under the Apache License Version the " \
"License you may not use this file except in compliance " \
"with the License"
t_config = TableConfig()
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, t_config)
# register Results table in table environment
tmp_dir = tempfile.gettempdir()
result_path = tmp_dir + '/result'
if os.path.exists(result_path):
try:
if os.path.isfile(result_path):
os.remove(result_path)
else:
shutil.rmtree(result_path)
except OSError as e:
logging.error("Error removing directory: %s - %s.", e.filename, e.strerror)
logging.info("Results directory: %s", result_path)
sink_ddl = """
create table Results(
word VARCHAR,
`count` BIGINT
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{}'
)
""".format(result_path)
t_env.execute_sql(sink_ddl)
elements = [(word, 1) for word in content.split(" ")]
t_env.from_elements(elements, ["word", "count"]) \
.group_by("word") \
.select("word, count(1) as count") \
.insert_into("Results")
t_env.execute("word_count")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
word_count()
|
py
|
1a5bf45381dcf538f4c6a73788441a8aed5098a5
|
class Element:
def __init__(self, *children):
self.parent = None
self.children = []
for child in children:
self.append(child)
def append(self, element):
"""Append a given element as a last child of this element"""
if isinstance(element, Element):
element.parent = self
self.children.append(element)
return self
def prepend(self, element):
"""Prepend a given element as a first child of this element"""
if isinstance(element, Element):
element.parent = self
self.children.insert(0, element)
return self
def wrap(self, element):
"""Wrap this element in a given element"""
deepest = element
while deepest.first_child:
deepest = deepest.first_child
deepest.append(self)
return element
def dump(self, indent=2):
def do_dump(element, depth=0):
yield '{indent}{element}'.format(
indent=' ' * (indent * depth),
element=repr(element)
)
if isinstance(element, Element):
for child in element:
yield from do_dump(child, depth + 1)
return '\n'.join(do_dump(self))
@property
def first_child(self):
return self.children[0] if self.children else None
@property
def last_child(self):
return self.children[-1] if self.children else None
def __iter__(self):
return iter(self.children)
def __eq__(self, other):
if type(self) is not type(other):
return False
mine = dict(vars(self))
del mine['parent']
others = dict(vars(other))
del others['parent']
return mine == others
def __repr__(self):
identifier = '{:04x}'.format(id(self))
keys = vars(self).keys()
keys -= {'parent', 'children'}
keys = filter(lambda key: not key.startswith('_'), keys)
mappings = [
'{}={}'.format(key, repr(getattr(self, key)))
for key in sorted(keys)
]
return '{name}#{identifier}({mappings})'.format(
name=self.__class__.__name__,
identifier=identifier[-4:],
mappings=', '.join(mappings)
)
|
py
|
1a5bf55a7fb059c67798e6c40d48955bede12505
|
from typing import Any, Dict, List, Optional
import aiohttp
from spare.cmds.units import units
from spare.consensus.block_record import BlockRecord
from spare.rpc.farmer_rpc_client import FarmerRpcClient
from spare.rpc.full_node_rpc_client import FullNodeRpcClient
from spare.rpc.wallet_rpc_client import WalletRpcClient
from spare.util.config import load_config
from spare.util.default_root import DEFAULT_ROOT_PATH
from spare.util.ints import uint16
from spare.util.misc import format_bytes
from spare.util.misc import format_minutes
from spare.util.network import is_localhost
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_harvesters(farmer_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_harvesters()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: Optional[int]) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: Optional[int]) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: Optional[int]) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: Optional[int]) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: Optional[int], limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
harvester_rpc_port: Optional[int],
farmer_rpc_port: Optional[int],
) -> None:
all_harvesters = await get_harvesters(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total spare farmed: {amounts['farmed_amount'] / units['spare']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['spare']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['spare']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
class PlotStats:
total_plot_size = 0
total_plots = 0
if all_harvesters is not None:
harvesters_local: dict = {}
harvesters_remote: dict = {}
for harvester in all_harvesters["harvesters"]:
ip = harvester["connection"]["host"]
if is_localhost(ip):
harvesters_local[harvester["connection"]["node_id"]] = harvester
else:
if ip not in harvesters_remote:
harvesters_remote[ip] = {}
harvesters_remote[ip][harvester["connection"]["node_id"]] = harvester
def process_harvesters(harvester_peers_in: dict):
for harvester_peer_id, plots in harvester_peers_in.items():
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_plots += len(plots["plots"])
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
process_harvesters(harvesters_local)
for harvester_ip, harvester_peers in harvesters_remote.items():
print(f"Remote Harvester{'s' if len(harvester_peers) > 1 else ''} for IP: {harvester_ip}")
process_harvesters(harvester_peers)
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_harvesters is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_harvesters is not None and PlotStats.total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'spare start wallet' and 'spare wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'spare wallet show'")
else:
print("Note: log into your key using 'spare wallet show' to see rewards for each key")
|
py
|
1a5bf59e7c1b8f6d98427c380c381270f1cd1c6b
|
import numpy as np
import pandas as pd
def accuracy_score(y_true, y_pred):
'''Class performance metric that computes the accuracy and y_pred'''
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1 # count the correct predictions
accuracy = correct/len(y_true)
return accuracy
def mse(y_true, y_pred, squared = True):
'''Mean squared error regression loss funcion.
Parameters
:param numpy.array y_true: array-like of shape(n_samples,)
Ground truth (correct) target values
:param numpy.array y_pred: array-like of shape(n_samples,)
Estimated target values
_param bool squared: If True resturns MSE, if false returns RMSE'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
errors = np.average((y_true-y_pred)**2, axis = 0)
if not squared:
errors = np.sqrt(errors)
return np.average(errors)
def mse_prime(y_true, y_pred):
return 2*(y_pred-y_true)/y_true.size
def cross_entropy(y_true, y_pred):
return -(y_true * np.log(y_pred)).sum()
def cross_entropy_prime(y_true, y_pred):
return y_pred - y_true
def r2_score(y_true, y_pred):
"""
R^2 regression score function.
R^2 = 1 - SS_res / SS_tot
where SS_res is the residual sum of squares and SS_tot is the total
sum of squares.
:param numpy.array y_true : array-like of shape (n_samples,) Ground truth (correct) target values.
:param numpy.array y_pred : array-like of shape (n_samples,) Estimated target values.
:returns: score (float) R^2 score.
"""
# Residual sum of squares.
numerator = ((y_true - y_pred) ** 2).sum(axis=0)
# Total sum of squares.
denominator = ((y_true - np.average(y_true, axis=0)) ** 2).sum(axis=0)
# R^2.
score = 1 - numerator / denominator
return score
def mse(y_true, y_pred):
"""
Mean squared error regression loss function.
Parameters
:param numpy.array y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
:param numpy.array y_pred: array-like of shape (n_samples,)
Estimated target values.
:returns: loss (float) A non-negative floating point value (the best value is 0.0).
"""
return np.mean(np.power(y_true-y_pred, 2))
class ConfusionMatrix:
def __init__(self, true_y, predict_y):
'''Confusion Matrix implementation for the
evaluation of the performance model by comparing the true vs the predicted values.'''
self.true_y = np.array(true_y)
self.predict_y = np.array(predict_y)
self.conf_matrix = None
def build_matrix(self):
self.conf_matrix = pd.crosstab(self.true_y,self.predict_y, rownames = ["True values"], colnames = ["Predicted values"])
def toDataframe(self):
return pd.DtaFrame(self.build_matrix())
|
py
|
1a5bf59e87e82c429f62ab165aec21611e4c0b6e
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanning code to find music in a library."""
import dataclasses
import mimetypes
import os
import pathlib
from typing import Iterable
import mutagen
from pepper_music_player.metadata import entity
from pepper_music_player.metadata import tag
@dataclasses.dataclass(frozen=True)
class File:
"""A file in the music library.
Attributes:
filename: Absolute filename.
dirname: Absolute name of the directory containing the file.
basename: Name of the file, relative to dirname.
"""
filename: str
dirname: str
basename: str
@dataclasses.dataclass(frozen=True)
class AudioFile(File):
"""An audio file.
Attributes:
track: The track in the file.
"""
track: entity.Track
@dataclasses.dataclass(frozen=True)
class ImageFile(File):
"""An image file.
Attributes:
image: The image in the file.
"""
image: entity.Image
def _read_audio_tags(dirname: str, basename: str, filename: str) -> tag.Tags:
"""Returns tags read from an audio file."""
file_info = mutagen.File(filename, easy=True)
return tag.Tags({
**(file_info.tags or {}),
tag.BASENAME: (basename,),
tag.DIRNAME: (dirname,),
tag.FILENAME: (filename,),
tag.DURATION_SECONDS: (str(file_info.info.length),),
}).derive()
def _read_image_tags(dirname: str, basename: str, filename: str) -> tag.Tags:
"""Returns tags read from an image file."""
# TODO(#61): Actually read more tags (e.g., width and height) from the file
# itself.
return tag.Tags({
tag.BASENAME: (basename,),
tag.DIRNAME: (dirname,),
tag.FILENAME: (filename,),
}).derive()
def scan(root_dirname: str) -> Iterable[File]:
"""Scans a directory."""
# TODO: Keep track of errors with os.walk(onerror=...)
# TODO: Catch and handle per-file errors.
for dirname, _, basenames in os.walk(os.path.abspath(root_dirname)):
dirpath = pathlib.Path(dirname)
for basename in basenames:
filepath = dirpath.joinpath(basename)
mime, _ = mimetypes.guess_type(filepath.as_uri())
mime_major, _, _ = (mime or '').partition('/')
if mime_major == 'audio':
yield AudioFile(
filename=str(filepath),
dirname=dirname,
basename=basename,
track=entity.Track(
tags=_read_audio_tags(dirname=dirname,
basename=basename,
filename=str(filepath))),
)
elif mime_major == 'image':
yield ImageFile(
filename=str(filepath),
dirname=dirname,
basename=basename,
image=entity.Image(tags=_read_image_tags(
dirname=dirname,
basename=basename,
filename=str(filepath),
)),
)
else:
yield File(filename=str(filepath),
dirname=dirname,
basename=basename)
|
py
|
1a5bf6dd3d036915a7840b3733f99b017264320e
|
import apsw
connection = apsw.Connection("apibb.db")
cursor = connection.cursor()
cursor.execute("CREATE TABLE names(name TEXT PRIMARY KEY, created INTEGER, expires INTEGER)")
cursor.execute("CREATE TABLE ads(name TEXT, uri TEXT, pubkey TEXT, created INTEGER, expires INTEGER)")
|
py
|
1a5bf73887f0ed98c8a3eca5d6f4bdf89dce9433
|
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'socialsonar.settings'
application = Cling(get_wsgi_application())
|
py
|
1a5bf762d8c3a5054020c2477e8504dce2bf43d9
|
# -*- coding:utf-8 -*-
import time
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
def run(url):
# 设置无头浏览器,字符编码,请求头等信息,防止反爬虫检测
options = Options()
options.add_argument('--headless')
options.add_argument('lang=zh_CN.UTF-8')
UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
options.add_argument('User-Agent=' + UserAgent)
browser = webdriver.Chrome()
browser.get(url)
res = etree.HTML(browser.page_source)
# 提取文章页的链接并爬取
article_urls = res.xpath('//div[@class="article-list"]/div/h4/a/@href')
for article_url in article_urls:
browser.get(article_url)
article_result = etree.HTML(browser.page_source)
title = article_result.xpath('//h1[@class="title-article"]/text()')[0]
publish_time = article_result.xpath('//div[@class="bar-content"]/span[@class="time"]/text()')[0]
print(publish_time, title)
browser.close()
if __name__ == '__main__':
start = time.time()
for i in range(1, 2): # 建立任务链接
url = 'https://blog.csdn.net/cui_yonghua/article/list/1'
run(url=url)
print('time cost:{}'.format(time.time() - start))
|
py
|
1a5bf8211a87a82ba5e533009dd4c6d452804ad1
|
import logging
from dirtyfields import DirtyFieldsMixin
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models
from django.utils.functional import cached_property
from django.utils import timezone
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
from include import IncludeManager
from framework.celery_tasks.handlers import enqueue_task
from osf.models.base import BaseModel, GuidMixin
from osf.models.mixins import GuardianMixin, TaxonomizableMixin
from osf.models.validators import validate_title
from osf.utils.fields import NonNaiveDateTimeField
from website.exceptions import NodeStateError
from website.util import api_v2_url
from website.search.exceptions import SearchUnavailableError
logger = logging.getLogger(__name__)
class CollectionSubmission(TaxonomizableMixin, BaseModel):
primary_identifier_name = 'guid___id'
class Meta:
order_with_respect_to = 'collection'
unique_together = ('collection', 'guid')
collection = models.ForeignKey('Collection', on_delete=models.CASCADE)
guid = models.ForeignKey('Guid', on_delete=models.CASCADE)
creator = models.ForeignKey('OSFUser')
collected_type = models.CharField(blank=True, max_length=31)
status = models.CharField(blank=True, max_length=31)
@cached_property
def _id(self):
return '{}-{}'.format(self.guid._id, self.collection._id)
@classmethod
def load(cls, data, select_for_update=False):
try:
cgm_id, collection_id = data.split('-')
except ValueError:
raise ValueError('Invalid CollectionSubmission object <_id {}>'.format(data))
else:
if cgm_id and collection_id:
try:
if isinstance(data, basestring):
return (cls.objects.get(guid___id=cgm_id, collection__guids___id=collection_id) if not select_for_update
else cls.objects.filter(guid___id=cgm_id, collection__guids___id=collection_id).select_for_update().get())
except cls.DoesNotExist:
return None
return None
def update_index(self):
if self.collection.is_public:
from website.search.search import update_collected_metadata
try:
update_collected_metadata(self.guid._id, collection_id=self.collection.id)
except SearchUnavailableError as e:
logger.exception(e)
def remove_from_index(self):
from website.search.search import update_collected_metadata
try:
update_collected_metadata(self.guid._id, collection_id=self.collection.id, op='delete')
except SearchUnavailableError as e:
logger.exception(e)
def save(self, *args, **kwargs):
kwargs.pop('old_subjects', None) # Not indexing this, trash it
ret = super(CollectionSubmission, self).save(*args, **kwargs)
self.update_index()
return ret
class Collection(DirtyFieldsMixin, GuidMixin, BaseModel, GuardianMixin):
objects = IncludeManager()
groups = {
'read': ('read_collection', ),
'write': ('read_collection', 'write_collection', ),
'admin': ('read_collection', 'write_collection', 'admin_collection', )
}
group_format = 'collections_{self.id}_{group}'
class Meta:
permissions = (
('read_collection', 'Read Collection'),
('write_collection', 'Write Collection'),
('admin_collection', 'Admin Collection'),
)
provider = models.ForeignKey('AbstractProvider', blank=True, null=True, on_delete=models.CASCADE)
creator = models.ForeignKey('OSFUser')
guid_links = models.ManyToManyField('Guid', through=CollectionSubmission, related_name='collections')
collected_types = models.ManyToManyField(
'contenttypes.ContentType',
related_name='+',
limit_choices_to={
'model__in': ['abstractnode', 'basefilenode', 'collection', 'preprintservice']
})
title = models.CharField(max_length=200, validators=[validate_title])
collected_type_choices = ArrayField(models.CharField(max_length=31), blank=True, default=list)
status_choices = ArrayField(models.CharField(max_length=31), blank=True, default=list)
is_public = models.BooleanField(default=False, db_index=True)
is_promoted = models.BooleanField(default=False, db_index=True)
is_bookmark_collection = models.BooleanField(default=False, db_index=True)
deleted = NonNaiveDateTimeField(null=True, blank=True)
def __unicode__(self):
return '{self.title!r}, with guid {self._id!r}'.format(self=self)
@property
def url(self):
return '/{}/'.format(self._id)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_api_v2_url(self):
return api_v2_url('/collections{}'.format(self.url))
@property
def linked_nodes_self_url(self):
return '{}relationships/linked_nodes/'.format(self.absolute_api_v2_url)
@property
def linked_registrations_self_url(self):
return '{}relationships/linked_registrations/'.format(self.absolute_api_v2_url)
@property
def linked_nodes_related_url(self):
return '{}linked_nodes/'.format(self.absolute_api_v2_url)
@property
def linked_registrations_related_url(self):
return '{}linked_registrations/'.format(self.absolute_api_v2_url)
@classmethod
def bulk_update_search(cls, cgms, op='update', index=None):
from website import search
try:
search.search.bulk_update_collected_metadata(cgms, op=op, index=index)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
def save(self, *args, **kwargs):
first_save = self.id is None
if self.is_bookmark_collection:
if first_save and self.creator.collection_set.filter(is_bookmark_collection=True, deleted__isnull=True).exists():
raise IntegrityError('Each user cannot have more than one Bookmark collection.')
if self.title != 'Bookmarks':
# Bookmark collections are always named 'Bookmarks'
self.title = 'Bookmarks'
saved_fields = self.get_dirty_fields() or []
ret = super(Collection, self).save(*args, **kwargs)
if first_save:
# Set defaults for M2M
self.collected_types = ContentType.objects.filter(app_label='osf', model__in=['abstractnode', 'collection'])
# Set up initial permissions
self.update_group_permissions()
self.get_group('admin').user_set.add(self.creator)
elif 'is_public' in saved_fields:
from website.collections.tasks import on_collection_updated
enqueue_task(on_collection_updated.s(self._id))
return ret
def has_permission(self, user, perm):
return user.has_perms(self.groups[perm], self)
def collect_object(self, obj, collector, collected_type=None, status=None):
""" Adds object to collection, creates CollectionSubmission reference
Performs type / metadata validation. User permissions checked in view.
:param GuidMixin obj: Object to collect. Must be of a ContentType specified in collected_types
:param OSFUser collector: User doing the collecting
:param str collected_type: Metadata "type" of submission, validated against collected_type_choices
:param str status: Metadata "status" of submission, validated against status_choices
:return: CollectionSubmission object or raise exception
"""
collected_type = collected_type or ''
status = status or ''
if self.collected_type_choices and collected_type not in self.collected_type_choices:
raise ValidationError('"{}" is not an acceptable "type" for this collection'.format(collected_type))
if self.status_choices and status not in self.status_choices:
raise ValidationError('"{}" is not an acceptable "status" for this collection'.format(status))
if not any([isinstance(obj, t.model_class()) for t in self.collected_types.all()]):
# Not all objects have a content_type_pk, have to look the other way.
# Ideally, all objects would, and we could do:
# self.content_types.filter(id=obj.content_type_pk).exists()
raise ValidationError('"{}" is not an acceptable "ContentType" for this collection'.format(ContentType.objects.get_for_model(obj).model))
# Unique together -- self and guid
if self.collectionsubmission_set.filter(guid=obj.guids.first()).exists():
raise ValidationError('Object already exists in collection.')
cgm = self.collectionsubmission_set.create(guid=obj.guids.first(), creator=collector)
cgm.collected_type = collected_type
cgm.status = status
cgm.save()
return cgm
def remove_object(self, obj):
""" Removes object from collection
:param obj: object to remove from collection, if it exists. Acceptable types- CollectionSubmission, GuidMixin
"""
if isinstance(obj, CollectionSubmission):
if obj.collection == self:
obj.remove_from_index()
self.collectionsubmission_set.filter(id=obj.id).delete()
return
else:
cgm = self.collectionsubmission_set.get(guid=obj.guids.first())
if cgm:
cgm.remove_from_index()
cgm.delete()
return
raise ValueError('Node link does not belong to the requested node.')
def delete(self):
""" Mark collection as deleted
"""
if self.is_bookmark_collection:
# Not really the right exception to raise, but it's for back-compatibility
# TODO: Use a more correct exception and catch it in the necessary places
raise NodeStateError('Bookmark collections may not be deleted.')
self.deleted = timezone.now()
if self.is_public:
self.bulk_update_search(list(self.collectionsubmission_set.all()), op='delete')
self.save()
class CollectionUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Collection, on_delete=models.CASCADE)
class CollectionGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Collection, on_delete=models.CASCADE)
|
py
|
1a5bf8f2a03864ccec91bd031d8e3ed367c803f6
|
import numpy as np
import time
import MyUtils
def _mean_squared_error(y, pred):
return 0.5 * np.mean((y - pred) ** 2)
def _mean_abs_error(y, pred):
return np.mean(np.abs(y, pred))
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _fourier(x):
return np.sin(x)
def _hardlimit(x):
return (x >= 0).astype(int)
def _identity(x):
return x
def getActivation(name):
return {
'sigmoid': _sigmoid,
'fourier': _fourier,
'hardlimit': _hardlimit
}[name]
def getLoss(name):
return {
'mse': _mean_squared_error,
'mae': _mean_abs_error
}[name]
class ELM:
def __init__(self, num_input_nodes, num_hidden_units, num_out_units, activation='sigmoid',
loss='mse', beta_init=None, w_init=None, bias_init=None):
self._num_input_nodes = num_input_nodes
self._num_hidden_units = num_hidden_units
self._num_out_units = num_out_units
self._activation = getActivation(activation)
self._loss = getLoss(loss)
if isinstance(beta_init, np.ndarray):
self._beta = beta_init
else:
self._beta = np.random.uniform(-1., 1., size=(self._num_hidden_units, self._num_out_units))
if isinstance(w_init, np.ndarray):
self._w = w_init
else:
self._w = np.random.uniform(-1, 1, size=(self._num_input_nodes, self._num_hidden_units))
if isinstance(bias_init, np.ndarray):
self._bias = bias_init
else:
self._bias = np.zeros(shape=(self._num_hidden_units,))
print('Bias shape:', self._bias.shape)
print('W shape:', self._w.shape)
print('Beta shape:', self._beta.shape)
def fit(self, X, Y, display_time=False):
H = self._activation(X.dot(self._w) + self._bias)
# Moore–Penrose pseudo inverse
if display_time:
start = time.time()
H_pinv = np.linalg.pinv(H)
if display_time:
stop = time.time()
print(f'Train time: {stop-start}')
self._beta = H_pinv.dot(Y)
# print('Fit Beta shape:', self._beta.shape)
def __call__(self, X):
H = self._activation(X.dot(self._w) + self._bias)
return H.dot(self._beta)
def evaluate(self, X, Y):
pred = self(X)
# Loss (base on model setting)
loss = self._loss(Y, pred)
# Accuracy
acc = np.sum(np.argmax(pred, axis=-1) == np.argmax(Y, axis=-1)) / len(Y)
# Unweighted Average Recall
# TODO
return loss, acc
if __name__ == "__main__":
X_train, y_train, X_test, y_test = MyUtils.load_data(r'C:\Users\panda\Desktop\光谱数据样例\index_AFGK_2kx4.csv', class_num=4,
norm=True, shuffle=True, split=0.8, one_hot=True)
model = ELM(
15,
500,
4
)
model.fit(X_train, y_train)
print(model.evaluate(X_test, y_test))
|
py
|
1a5bfb4742061dd179fe7ec082155091e6866f01
|
#数据库模型
from datetime import datetime
from sayhello import db
class Message(db.Model):
id = db.Column(db.Integer,primary_key=True)
body = db.Column(db.String(20))
name = db.Column(db.String(20))
timestamp = db.Column(db.DateTime,default=datetime.utcnow,index=True)
|
py
|
1a5bfbbf06a51432c97a1533126b0dc510508b09
|
# NOT NEEDED AT THIS TIME
# ==================================================================================
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================
#
# audioUtils.py
# by: Rob Dachowski
# For questions or feedback, please contact [email protected]
#
# Purpose: The program provides a number of utility audio functions used to create
# transcribed, translated, and subtitled videos using Amazon Transcribe,
# Amazon Translate, Amazon Polly, and MoviePy
#
# Change Log:
# 6/29/2018: Initial version
#
# ==================================================================================
import boto3
import os
import json
import contextlib
from moviepy.editor import *
from moviepy import editor
from contextlib import closing
# ==================================================================================
# Function: writeAudio
# Purpose: writes the bytes associates with the stream to a binary file
# Parameters:
# output_file - the name + extension of the ouptut file (e.g. "abc.mp3")
# stream - the stream of bytes to write to the output_file
# ==================================================================================
def writeAudio( output_file, stream ):
bytes = stream.read()
print "\t==> Writing ", len(bytes), "bytes to audio file: ", output_file
try:
# Open a file for writing the output as a binary stream
with open(output_file, "wb") as file:
file.write(bytes)
if file.closed:
print "\t==>", output_file, " is closed"
else:
print "\t==>", output_file, " is NOT closed"
except IOError as error:
# Could not write to file, exit gracefully
print(error)
sys.exit(-1)
# ==================================================================================
# Function: createAudioTrackFromTranslation
# Purpose: Using the provided transcript, get a translation from Amazon Translate, then use Amazon Polly to synthesize speech
# Prrameters:
# region - the aws region in which to run the service
# transcript - the Amazon Transcribe JSON structure to translate
# sourceLangCode - the language code for the original content (e.g. English = "EN")
# targetLangCode - the language code for the translated content (e.g. Spanich = "ES")
# audioFileName - the name (including extension) of the target audio file (e.g. "abc.mp3")
# ==================================================================================
def createAudioTrackFromTranslation( region, transcript, sourceLangCode, targetLangCode, audioFileName ):
print( "\n==> createAudioTrackFromTranslation " )
# Set up the polly and translate services
client = boto3.client('polly')
translate = boto3.client(service_name='translate', region_name=region, use_ssl=True)
#get the transcript text
temp = json.loads( transcript)
transcript_txt = temp["results"]["transcripts"][0]["transcript"]
voiceId = getVoiceId( targetLangCode )
# Now translate it.
translated_txt = unicode((translate.translate_text(Text=transcript_txt, SourceLanguageCode=sourceLangCode, TargetLanguageCode=targetLangCode))["TranslatedText"])[:2999]
# Use the translated text to create the synthesized speech
response = client.synthesize_speech( OutputFormat="mp3", SampleRate="22050", Text=translated_txt, VoiceId=voiceId)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print( "\t==> Successfully called Polly for speech synthesis")
writeAudioStream( response, audioFileName )
else:
print( "\t==> Error calling Polly for speech synthesis")
# ==================================================================================
# Function: writeAudioStream
# Purpose: Utility to write an audio file from the response from the Amazon Polly API
# Prrameters:
# response - the Amazaon Polly JSON response
# audioFileName - the name (including extension) of the target audio file (e.g. "abc.mp3")
# ==================================================================================
def writeAudioStream( response, audioFileName ):
# Take the resulting stream and write it to an mp3 file
if "AudioStream" in response:
with closing(response["AudioStream"]) as stream:
output = audioFileName
writeAudio( output, stream )
# ==================================================================================
# Function: getVoiceId
# Purpose: Utility to return the name of the voice to use given a language code. Note: this is only populated with the
# VoiceIds used for this example. Refer to the Amazon Polly API documentation for other voiceId names
# Prrameters:
# targetLangCode - the language code used for the target Amazon Polly output
# ==================================================================================
def getVoiceId( targetLangCode ):
# Feel free to add others as desired
if targetLangCode == "es":
voiceId = "Penelope"
elif targetLangCode == "de":
voiceId = "Marlene"
return voiceId
# ==================================================================================
# Function: getSecondsFromTranslation
# Purpose: Utility to determine how long in seconds it will take for a particular phrase of translated text to be spoken
# Prrameters:
# textToSynthesize - the raw text to be synthesized
# targetLangCode - the language code used for the target Amazon Polly output
# audioFileName - the name (including extension) of the target audio file (e.g. "abc.mp3")
# ==================================================================================
def getSecondsFromTranslation( textToSynthesize, targetLangCode, audioFileName ):
# Set up the polly and translate services
client = boto3.client('polly')
translate = boto3.client(service_name='translate', region_name="us-east-1", use_ssl=True)
# Use the translated text to create the synthesized speech
response = client.synthesize_speech( OutputFormat="mp3", SampleRate="22050", Text=textToSynthesize, VoiceId=getVoiceId( targetLangCode ) )
# write the stream out to disk so that we can load it into an AudioClip
writeAudioStream( response, audioFileName )
# Load the temporary audio clip into an AudioFileClip
audio = AudioFileClip( audioFileName)
# return the duration
return audio.duration
|
py
|
1a5bfcb69aac8738dad98ef47a4aa39f1c96bc0a
|
class Solution:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort()
s.sort()
indexChild = 0
for itemCookie in s:
if itemCookie >= g[indexChild]:
indexChild += 1
if indexChild > len(g) - 1:
break
return indexChild
|
py
|
1a5bfd0ca171c025af39fb197a8554013f25a2ef
|
""" Onmt NMT Model base class definition """
import torch.nn as nn
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (:obj:`EncoderBase`): an encoder object
decoder (:obj:`RNNDecoderBase`): a decoder object
multi<gpu (bool): setup for multigpu support
"""
def __init__(self, encoder, decoder, multigpu=False):
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs
enc_final, memory_bank = self.encoder(src, lengths)
enc_state = \
self.decoder.init_decoder_state(src, memory_bank, enc_final)
decoder_outputs, dec_state, attns = \
self.decoder(tgt, memory_bank,
enc_state if dec_state is None
else dec_state,
memory_lengths=lengths)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return memory_bank, decoder_outputs, attns, dec_state
|
py
|
1a5bfea3f1818e80a37de71c03dd5ba086e4bdfd
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-17 16:45
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ITAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('model_type', models.CharField(blank=True, max_length=32, null=True)),
('name', models.CharField(db_index=True, max_length=512, null=True)),
('has_activated', models.BooleanField(default=False, verbose_name='是否激活')),
('description', models.TextField(blank=True, null=True, verbose_name='备注信息')),
],
options={
'db_table': 'it_assets',
},
),
migrations.CreateModel(
name='HardwareAsset',
fields=[
('itasset_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='assets.ITAsset')),
('ip_set', django.contrib.postgres.fields.jsonb.JSONField(verbose_name='IP地址')),
('mac_address_set', django.contrib.postgres.fields.jsonb.JSONField(verbose_name='MAC地址')),
('management_port', models.IntegerField(default=22, verbose_name='管理端口')),
('idc_location', models.CharField(blank=True, max_length=512, null=True, verbose_name='IDC机房位置')),
('hardware_type', models.CharField(blank=True, max_length=512, null=True)),
('cpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='CPU配置')),
('memory', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('disk', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('network_card', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('os_type', models.CharField(blank=True, max_length=32, null=True, verbose_name='服务器类型')),
('os_version', models.CharField(blank=True, max_length=32, null=True, verbose_name='服务器版本')),
('asset_number', models.CharField(blank=True, max_length=128, null=True)),
('sn', models.CharField(blank=True, max_length=128, null=True)),
('cabinet_number', models.CharField(blank=True, max_length=128, null=True)),
('server_location', models.CharField(blank=True, max_length=512, null=True)),
('server_type', models.CharField(blank=True, max_length=128, null=True)),
('run_env', models.CharField(blank=True, max_length=128, null=True)),
('server_status', models.CharField(blank=True, max_length=128, null=True)),
('put_shelf_time', models.DateTimeField(verbose_name='上架时间')),
],
options={
'db_table': 'hardware_assets',
},
bases=('assets.itasset',),
),
migrations.CreateModel(
name='SoftwareAsset',
fields=[
('itasset_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='assets.ITAsset')),
('version', models.CharField(blank=True, max_length=128, null=True, verbose_name='软件版本')),
('license_type', models.CharField(blank=True, max_length=128, null=True, verbose_name='许可证类型')),
('hardware_id', models.IntegerField(blank=True, null=True, verbose_name='运行硬件的ID')),
],
options={
'db_table': 'software_assets',
},
bases=('assets.itasset',),
),
]
|
py
|
1a5bfee198966be237a6be3d3838176e2d1cdb29
|
"""Geographical extracts of natural increase, nom and nim
"""
from pathlib import Path
import pandas as pd
import data
import file_paths
from data import read_abs_data, read_abs_meta_data
DATA_ABS_PATH = Path.home() / "Documents/Analysis/Australian economy/Data/ABS"
def read_3101():
series_id = data.series_id_3101()
return data.read_abs_data(series_id=series_id)
def nom(df=None):
"""Exract NOM data
Parameters
----------
df : [type], optional
[description], by default None
"""
if df is None:
df = read_3101()
return df.net_overseas_migration
def nom_year_ending(df_nom=None):
"""Return year ending nom
Parameters
----------
nom : [type], optional
[description], by default None
"""
if df_nom is None:
df_nom = read_3101()
return df_nom.net_overseas_migration.rolling(4).sum().dropna()
def nom_year_ending_annual(df_nom=None, quarter="A-Jun"):
"""Return year ending for a given quarter
Parameters
----------
df_nom : Pandas series, optional
contains nom in sub-annual data
"""
if df_nom is None:
df_nom = nom()
# check there are 4 quarters that match the periodicity of "quarter"
# find the first quart to match ending quarter, and remove elements to the subsequent quarter
for i, date_ in enumerate(df_nom.index[:4]):
if date_.strftime("%b") == quarter[-3:]:
idx = i + 1
df_nom = df_nom.iloc[idx:]
break
if df_nom.index[3].strftime("%b") != quarter[-3:]:
print("1st DATE VALUE IS NOT A FULL YEAR")
nom_annual = df_nom.resample(quarter).sum()
# remove last year if not full year (ie nom last period == quarter parameter)
if df_nom.index[-1].strftime("%b") != quarter[-3:]:
nom_annual = nom_annual.iloc[:-1]
return nom_annual
def component_shares_between_dates(df):
"""
Calculate the nom and natural contribution to population growth over the period covered
by the dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
None but prints out a summary of population increase and component contributions
"""
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("Chris - the dataframe does not have a time series index")
idx_erp_start = df.first_valid_index()
# Sum of components must start from 2nd period - components in first period
# contribute to the start ERP only
idx_component_start = df.iloc[1:].first_valid_index()
idx_erp_end = df.last_valid_index()
pop_delta = (
df.loc[idx_erp_end].estimated_resident_population
- df.loc[idx_erp_start].estimated_resident_population
)
pop_deta_pct_increase = (
pop_delta / df.loc[idx_erp_start].estimated_resident_population
)
nom = df.loc[idx_component_start:].net_overseas_migration.sum()
natural_increase = df.loc[idx_component_start:].natural_increase.sum()
components = nom + natural_increase
nom_share = nom / components
natural_increase_share = natural_increase / components
print(f"Between {idx_erp_start:%Y-%m-%d} and {idx_erp_end:%Y-%m-%d}:\n")
print(
f"Population increased {pop_delta * 1000:,.0f} ({pop_deta_pct_increase:.1%}) people.\n"
)
print(
f"{nom_share:.1%} from NOM, {natural_increase_share:.1%} from natural increase."
)
return
def annual_population_components(df=None, month=6):
"""
TODO: read in 3101 rather than passing in as df
Calculate annual nom and natural increase components over the period covered by a 3101 dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
a dataframe
"""
if df is None:
df = read_3101()
ERP = df[df.index.month == month].estimated_resident_population
ERP_flow = ERP.diff()
ERP_flow.name = "ERP_flow"
NOM = df.net_overseas_migration.rolling(4).sum()
NOM = NOM[NOM.index.month == month]
natural = df.natural_increase.rolling(4).sum()
natural = natural[natural.index.month == month]
population = pd.concat([ERP, ERP_flow, natural, NOM], axis=1)
## Adjust nom for period 1996 through 2005
# population.loc["1996":"2005", "net_overseas_migration"] = population.loc["1996":"2005", "net_overseas_migration"] * 1.25
population = population.assign(
NI_and_NOM=lambda x: x[["natural_increase", "net_overseas_migration"]].sum(
axis=1
)
)
# adjust NOM and natural increase to be correct levels of ERP - apportion intercensal equally
nom_intercensal_NOM_share = (
population.net_overseas_migration / population.NI_and_NOM
)
population = population.assign(
nom_adj=lambda x: nom_intercensal_NOM_share * x.ERP_flow
).assign(
natural_increase_adj=lambda x: (1 - nom_intercensal_NOM_share) * x.ERP_flow
)
return population
def get_pop_by_age(region=None, gender=None):
filepath = file_paths.abs_data_folder / "3101 age by year by gender.parquet"
df = pd.read_parquet(filepath)
if region:
### need to generalise for multiple regions..pass list etc
if region in df.region.unique():
df = df[df.region == region]
else:
raise ValueError(f"{region} is not in list of regions: {', '.join(sorted(df.region.unique()))}")
if gender:
if gender in df.gender.unique():
df = df[df.gender == gender]
else:
raise ValueError(f"{region} is not in list of regions: {', '.join(sorted(df.gender.unique()))}")
return df
|
py
|
1a5c000555171c2d25c7ef47b7ae5b40763972e8
|
import os
import sys
from setuptools import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
if sys.version_info < (2, 6):
raise Exception("redisrollforward requires Python 2.6 or higher.")
package_name = 'redisrollforward'
package_fullname = 'python-%s' % package_name
root_dir = os.path.split(os.path.abspath(__file__))[0]
package_dir = os.path.join(root_dir, package_name)
def get_module():
if root_dir not in sys.path:
sys.path.insert(0,root_dir)
return __import__(package_name)
mod = get_module()
# Try to import lib build
try:
from extensions.setup import libparams
except ImportError:
libparams = None
def read(fname):
return open(os.path.join(root_dir, fname)).read()
def requirements():
req = read('requirements.txt').replace('\r','').split('\n')
result = []
for r in req:
r = r.replace(' ','')
if r:
result.append(r)
return result
class osx_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
def get_rel_dir(d,base,res=''):
if d == base:
return res
br,r = os.path.split(d)
if res:
r = os.path.join(r,res)
return get_rel_dir(br,base,r)
packages, data_files = [], []
pieces = fullsplit(root_dir)
if pieces[-1] == '':
len_root_dir = len(pieces) - 1
else:
len_root_dir = len(pieces)
for dirpath, _, filenames in os.walk(package_dir):
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)[len_root_dir:]))
elif filenames and not dirpath.endswith('__pycache__'):
rel_dir = get_rel_dir(dirpath, package_dir)
data_files.extend((os.path.join(rel_dir, f) for f in filenames))
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
def run_setup(params=None, argv=None):
params = params or {'cmdclass': {}}
if sys.platform == "darwin":
params['cmdclass']['install_data'] = osx_install_data
else:
params['cmdclass']['install_data'] = install_data
argv = argv if argv is not None else sys.argv
if len(argv) > 1:
if argv[1] == 'install' and sys.version_info >= (3,0):
packages.remove('redisrollforward.fallback')
params.update({'name': package_fullname,
'version': mod.__version__,
'author': mod.__author__,
'author_email': mod.__contact__,
'url': mod.__homepage__,
'license': mod.__license__,
'description': mod.__doc__,
'long_description': read('README.rst'),
'packages': packages,
'package_data': {package_name: data_files},
'classifiers': mod.CLASSIFIERS,
'install_requires': requirements()})
setup(**params)
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
run_setup()
status_msgs("redisrollforward build succeeded.")
#EOF
|
py
|
1a5c001bbbfd41f88596f4c126bb59fae8243e13
|
from Utils.utils import get_logger
import pydicom as dicom
import numpy as np
from numpy.linalg import norm
import os
from pydicom.pixel_data_handlers.util import apply_modality_lut
logger = get_logger(__name__)
class DCMreaderVMLa:
def __init__(self, folder_name):
self.broken = False
self.ch2_frames = []
self.ch3_frames = []
self.ch4_frames = []
self.ch2_file_paths = []
self.ch3_file_paths = []
self.ch4_file_paths = []
self.ch2_frames_matrice = None
self.ch3_frames_matrice = None
self.ch4_frames_matrice = None
dcm_files = sorted(os.listdir(folder_name))
for idx, file in enumerate(dcm_files):
if file.find('.dcm') != -1:
try:
temp_ds = dicom.dcmread(os.path.join(folder_name, file))
self.classifyLaFrame(temp_ds, os.path.join(folder_name, file))
except Exception as ex:
print('Couldnt read file: {}'.format(os.path.join(folder_name, file)))
print('Failed due to: ')
print(ex)
self.broken = True
return
if len(self.ch2_frames) == 0 and len(self.ch3_frames) == 0 and len(self.ch4_frames) == 0:
self.broken = True
logger.warning("There are no frames. This folder should be deleted. Path: {}".format(folder_name))
else:
self.loadMatrices()
def classifyLaFrame(self, ds, file_path):
orientationDirCosines = ds.data_element('ImageOrientationPatient')
orientNPArray = np.cross(orientationDirCosines[0:3], orientationDirCosines[3:6])
ch2_direction = np.array([0.7692, 0.6184, 0.0081])
ch3_direction = np.array([0.7335, 0.1403, 0.6574])
ch4_direction = np.array([0.0144, -0.5744, 0.7982])
windowedFrame = apply_modality_lut(ds.pixel_array, ds)
cosOfAngle_ch2 = np.dot(orientNPArray, ch2_direction) / norm(ch2_direction) / norm(orientNPArray)
cosOfAngle_ch3 = np.dot(orientNPArray, ch3_direction) / norm(ch3_direction) / norm(orientNPArray)
cosOfAngle_ch4 = np.dot(orientNPArray, ch4_direction) / norm(ch4_direction) / norm(orientNPArray)
cosofAngles = [abs(cosOfAngle_ch2), abs(cosOfAngle_ch3), abs(cosOfAngle_ch4)]
minIdx = np.argmax(cosofAngles)
if minIdx == 0:
self.ch2_frames.append(windowedFrame)
self.ch2_file_paths.append(file_path)
return
if minIdx == 1:
self.ch3_frames.append(windowedFrame)
self.ch3_file_paths.append(file_path)
return
if minIdx == 2:
self.ch4_frames.append(windowedFrame)
self.ch4_file_paths.append(file_path)
return
def loadMatrices(self):
if len(self.ch2_frames) > 0:
size_h, size_w = self.ch2_frames[0].shape
self.ch2_frames_matrice = np.ones((len(self.ch2_frames), size_h, size_w))
for i in range(len(self.ch2_frames)):
if self.ch2_frames[i].shape == self.ch2_frames[0].shape:
self.ch2_frames_matrice[i] = self.ch2_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch2_file_paths[i]))
if len(self.ch3_frames) > 0:
size_h, size_w = self.ch3_frames[0].shape
self.ch3_frames_matrice = np.ones((len(self.ch3_frames), size_h, size_w))
for i in range(len(self.ch3_frames)):
if self.ch3_frames[i].shape == self.ch3_frames[0].shape:
self.ch3_frames_matrice[i] = self.ch3_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch3_file_paths[i]))
if len(self.ch4_frames) > 0:
size_h, size_w = self.ch4_frames[0].shape
self.ch4_frames_matrice = np.ones((len(self.ch4_frames), size_h, size_w))
for i in range(len(self.ch4_frames)):
if self.ch4_frames[i].shape == self.ch4_frames[0].shape:
self.ch4_frames_matrice[i] = self.ch4_frames[i]
else:
logger.error('Wrong shape at {}'.format(self.ch4_file_paths[i]))
def isBroken(self):
return self.broken
|
py
|
1a5c007af43023d03e16e8dc71fd6d0cf01dc4d5
|
# -*- coding: utf-8 -*
import subprocess
import sys
from distutils.cmd import Command
from setuptools import setup
try:
from babel import __version__
except SyntaxError as exc:
sys.stderr.write("Unable to import Babel (%s). Are you running a supported version of Python?\n" % exc)
sys.exit(1)
class import_cldr(Command):
description = 'imports and converts the CLDR data'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call([sys.executable, 'scripts/download_import_cldr.py'])
setup(
name='Babel',
version=__version__,
description='Internationalization utilities',
long_description="""A collection of tools for internationalizing Python applications.""",
author='Armin Ronacher',
author_email='[email protected]',
license='BSD',
url='http://babel.pocoo.org/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
packages=['babel', 'babel.messages', 'babel.localtime'],
include_package_data=True,
install_requires=[
# This version identifier is currently necessary as
# pytz otherwise does not install on pip 1.4 or
# higher.
'pytz>=2015.7',
],
cmdclass={'import_cldr': import_cldr},
zip_safe=False,
# Note when adding extractors: builtin extractors we also want to
# work if packages are not installed to simplify testing. If you
# add an extractor here also manually add it to the "extract"
# function in babel.messages.extract.
entry_points="""
[console_scripts]
pybabel = babel.messages.frontend:main
[distutils.commands]
compile_catalog = babel.messages.frontend:compile_catalog
extract_messages = babel.messages.frontend:extract_messages
init_catalog = babel.messages.frontend:init_catalog
update_catalog = babel.messages.frontend:update_catalog
[distutils.setup_keywords]
message_extractors = babel.messages.frontend:check_message_extractors
[babel.checkers]
num_plurals = babel.messages.checkers:num_plurals
python_format = babel.messages.checkers:python_format
[babel.extractors]
ignore = babel.messages.extract:extract_nothing
python = babel.messages.extract:extract_python
javascript = babel.messages.extract:extract_javascript
"""
)
|
py
|
1a5c0086f197db076a7569a15e169b290ddaa1d7
|
# Time: O(n)
# Space: O(1)
# Count the number of segments in a string,
# where a segment is defined to be a contiguous
# sequence of non-space characters.
#
# Please note that the string does not
# contain any non-printable characters.
#
# Example:
#
# Input: "Hello, my name is John"
# Output: 5
class Solution(object):
def countSegments(self, s):
"""
:type s: str
:rtype: int
"""
result = int(len(s) and s[-1] != ' ')
for i in xrange(1, len(s)):
if s[i] == ' ' and s[i-1] != ' ':
result += 1
return result
def countSegments2(self, s):
"""
:type s: str
:rtype: int
"""
return len([i for i in s.strip().split(' ') if i])
|
py
|
1a5c019fe50caf3cbb4a9b4b27ee9187dbd1869f
|
from .login_histories import LoginHistory
from .users import User
|
py
|
1a5c0205da351d628674452b32087487cfd21352
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to import files into a Cloud Composer environment's bucket."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import posixpath
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import flags
from googlecloudsdk.command_lib.composer import resource_args
from googlecloudsdk.command_lib.composer import storage_util
class Import(base.Command):
"""Import plugins from local storage or Cloud Storage into an environment.
If the SOURCE is a directory, it and its contents are imported recursively.
Colliding files in the environment's Cloud Storage bucket will be
overwritten. If a file exists in the bucket but is not present in the SOURCE,
it is not removed.
## EXAMPLES
Suppose the '/foo' directory in the local filesystem has the following
structure:
foo
|
+-- subdir1
| |
| +-- file1.txt
| +-- file2.txt
|
+-- subdir2
| |
| +-- file3.txt
| +-- file4.txt
And the environment `myenv`'s Cloud Storage bucket has the following
structure:
gs://the-bucket
|
+-- plugins
| |
| +-- foo
| | |
| | +-- subdir1
| | | |
| | | +-- bar.txt
The following command:
{command} myenv --source=/foo
would result in the following structure in `myenv`'s Cloud Storage bucket:
gs://the-bucket
|
+-- plugins
| |
| +-- foo
| | |
| | +-- subdir1
| | | |
| | | +-- bar.txt
| | | +-- file1.txt
| | | +-- file2.txt
| | |
| | +-- subdir2
| | | |
| | | +-- file3.txt
| | | +-- file4.txt
If instead we had run
{command} myenv --source=/foo --destination=bar
the resulting bucket structure would be the following:
gs://the-bucket
|
+-- plugins
| |
| +-- foo
| | |
| | +-- subdir1
| | | |
| | | +-- bar.txt
| |
| +-- bar
| | |
| | +-- foo
| | | |
| | | +-- subdir1
| | | | |
| | | | +-- file1.txt
| | | | +-- file2.txt
| | | |
| | | +-- subdir2
| | | | |
| | | | +-- file3.txt
| | | | +-- file4.txt
"""
SUBDIR_BASE = 'plugins'
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(
parser, 'into whose Cloud Storage bucket to import plugins.',
positional=False)
flags.AddImportSourceFlag(parser, Import.SUBDIR_BASE)
flags.AddImportDestinationFlag(parser, Import.SUBDIR_BASE)
def Run(self, args):
storage_util.WarnIfWildcardIsPresent(args.source, '--source')
env_ref = args.CONCEPTS.environment.Parse()
gcs_subdir = Import.SUBDIR_BASE
if args.destination:
gcs_subdir = posixpath.join(gcs_subdir,
args.destination.strip(posixpath.sep))
gcs_subdir = posixpath.join(gcs_subdir, '')
return storage_util.Import(
env_ref, args.source, gcs_subdir, release_track=self.ReleaseTrack())
|
py
|
1a5c025771556fa66cf921b80d71fe87cf9d48b0
|
#!/usr/bin/env python3
# Problem 6: Sum square difference
# https://projecteuler.net/problem=6
import sys
def euler006(bound):
numbers_sum = 0
squares_sum = 0
for number in range(1, bound + 1):
numbers_sum += number
squares_sum += number ** 2
return numbers_sum ** 2 - squares_sum
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler006(parse_input(sys.stdin.readlines())))
|
py
|
1a5c02c45c57988e589015d9dc4ba17374db6c30
|
import win32debug, sys, os
string_obj = "TestString123"
big_string_obj = string_obj * 100
bytes_obj = b"TestBytes123" # Python 2: str, Python 3: bytes
unicode_obj = u"TestUnicode" # Python 2: unicode, Python 3: str
byte_array_object = bytearray(b'TestBytearray123')
int_obj = int(1)
long_obj = 123456789012345678901234567890123456789012345678901234567890
float_obj = 3.1415
complex_obj = complex(1.5, -2.25)
bool_true_obj = True
bool_false_obj = False
none_obj = None
type_obj = dict
not_implemented_obj = NotImplemented
def test_function(x):
"""Some DocString"""
return x*x
func_obj = test_function
list_obj = [string_obj, int_obj, long_obj]
tuple_obj = (string_obj, int_obj, long_obj)
set_obj = { string_obj, int_obj, long_obj }
dict_obj = {
"string_obj": string_obj,
"int_obj": int_obj,
"long_obj": long_obj,
}
win32debug.dump_process("object_types.dmp")
|
py
|
1a5c0384a10afa7706286eda04166ade207d67c3
|
"""Application Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class RedirectionSchema(BaseSchema):
# Content swagger.json
redirect_from = fields.Str(required=False)
redirect_to = fields.Str(required=False)
|
py
|
1a5c03b1a37bc6a41551b9dd42629f1f0128514c
|
"""Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import (
display_path, backup_dir, call_subprocess, rmtree, ask_path_exists,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._vendor.six.moves.urllib import parse as urllib_parse
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type
)
from pip._internal.utils.ui import SpinnerInterface # noqa: F401
AuthInfo = Tuple[Optional[str], Optional[str]]
__all__ = ['vcs']
logger = logging.getLogger(__name__)
class RemoteNotFoundError(Exception):
pass
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(self, vcs, rev=None, extra_args=None):
# type: (VersionControl, Optional[str], Optional[List[str]]) -> None
"""
Args:
vcs: a VersionControl object.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vcs = vcs
def __repr__(self):
return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev)
@property
def arg_rev(self):
# type: () -> Optional[str]
if self.rev is None:
return self.vcs.default_arg_rev
return self.rev
def to_args(self):
# type: () -> List[str]
"""
Return the VCS-specific command arguments.
"""
args = [] # type: List[str]
rev = self.arg_rev
if rev is not None:
args += self.vcs.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
# type: () -> str
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, Type[VersionControl]]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# type: () -> None
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
# type: () -> List[Type[VersionControl]]
return list(self._registry.values())
@property
def dirnames(self):
# type: () -> List[str]
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
# type: () -> List[str]
schemes = [] # type: List[str]
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
# type: (Type[VersionControl]) -> None
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
# type: (Optional[Type[VersionControl]], Optional[str]) -> None
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_type(self, location):
# type: (str) -> Optional[Type[VersionControl]]
"""
Return the type of the version control backend if found at given
location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type
return None
def get_backend(self, name):
# type: (str) -> Optional[Type[VersionControl]]
name = name.lower()
if name in self._registry:
return self._registry[name]
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
repo_name = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
# type: (Optional[str], Optional[List[str]]) -> RevOptions
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(self, rev, extra_args=extra_args)
@classmethod
def _is_local_repository(cls, repo):
# type: (str) -> bool
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or bool(drive)
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_netloc_and_auth(self, netloc, scheme):
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
def get_url_rev_and_auth(self, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = self.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
def make_rev_args(self, username, password):
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
# type: (str) -> Tuple[str, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options
def normalize_url(self, url):
# type: (str) -> str
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
# type: (str, str) -> bool
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest):
# type: (str) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location):
# type: (str) -> None
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
@classmethod
def get_src_requirement(cls, location, project_name):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
@classmethod
def get_remote_url(cls, location):
"""
Return the url used at location
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
raise NotImplementedError
@classmethod
def get_revision(cls, location):
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
@classmethod
def run_command(
cls,
cmd, # type: List[str]
show_stdout=True, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [cls.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode=on_returncode,
extra_ok_returncodes=extra_ok_returncodes,
command_desc=command_desc,
extra_environ=extra_environ,
unset_environ=cls.unset_environ,
spinner=spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command %r - do you have '
'%r installed and in your '
'PATH?' % (cls.name, cls.name))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
# type: (str) -> bool
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def controls_location(cls, location):
# type: (str) -> bool
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For example,
the Git override checks that Git is actually available.
"""
return cls.is_repository_directory(location)
|
py
|
1a5c052ecca726ecd737fcebbd7c01ac12fa2670
|
# -*- coding: utf-8 -*-
class Solution:
def lengthOfLIS(self, nums):
if not nums:
return 0
result = [1] * len(nums)
for i in range(len(nums)):
partial = result[i]
for j in range(i):
if nums[i] > nums[j]:
partial = max(partial, 1 + result[j])
result[i] = partial
return max(result)
if __name__ == '__main__':
solution = Solution()
assert 4 == solution.lengthOfLIS([10, 9, 2, 5, 3, 7, 101, 18])
|
py
|
1a5c069dfa20b4b9fe83c40a1127835a16477758
|
#!/usr/bin/env python
# encoding: utf-8
from bunch import bunchify # #for json.dot.notation instead of json['annoying']['dict']
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.timezone import now
from taggit.managers import TaggableManager
from flickr.flickr_spec import FLICKR_PHOTO_SIZES, build_photo_source
from flickr.utils import ts_to_dt, unslash
URL_BASE = getattr(settings, 'FLICKR_URL_BASE', 'http://www.flickr.com/')
class FlickrUserManager(models.Manager):
def update_from_json(self, pk, info, **kwargs):
person = bunchify(info['person'])
user_data = {'username': person.username._content, 'realname': person.realname._content,
'flickr_id': person.id, 'nsid': person.nsid, 'ispro': person.ispro,
'iconserver': person.iconserver, 'iconfarm': person.iconfarm, 'path_alias': person.path_alias,
'photosurl': unslash(person.photosurl._content),
'profileurl': unslash(person.profileurl._content),
'mobileurl': unslash(person.mobileurl._content),
'tzoffset' : person.timezone.offset,
#'last_sync': now(),
}
return self.filter(pk=pk).update(**dict(user_data.items() + kwargs.items()))
class FlickrUser(models.Model):
user = models.OneToOneField(User)
flickr_id = models.CharField(max_length=50, null=True, blank=True)
nsid = models.CharField(max_length=32, null=True, blank=True)
username = models.CharField(max_length=64, null=True, blank=True)
realname = models.CharField(max_length=64, null=True, blank=True)
photosurl = models.URLField(max_length=255, null=True, blank=True)
profileurl = models.URLField(max_length=255, null=True, blank=True)
mobileurl = models.URLField(max_length=255, null=True, blank=True)
iconserver = models.CharField(max_length=4, null=True, blank=True)
iconfarm = models.PositiveSmallIntegerField(null=True, blank=True)
path_alias = models.CharField(max_length=32, null=True, blank=True)
ispro = models.NullBooleanField()
tzoffset = models.CharField(max_length=6, null=True, blank=True)
token = models.CharField(max_length=128, null=True, blank=True)
perms = models.CharField(max_length=32, null=True, blank=True)
last_sync = models.DateTimeField(null=True, blank=True)
objects = FlickrUserManager()
class Meta:
ordering = ['id']
def __unicode__(self):
return u"%s" % self.username
@property
def flickr_page_url(self):
if self.username:
return '%sphotos/%s/' % (URL_BASE, self.username)
return '%sphotos/%s/' % (URL_BASE, self.nsid)
def bump(self):
self.last_sync = now()
self.save()
class FlickrModel(models.Model):
flickr_id = models.CharField(unique=True, db_index=True, max_length=50)
user = models.ForeignKey(FlickrUser)
show = models.BooleanField(default=True) # #show the photo on your page?
last_sync = models.DateTimeField(blank=True, null=True, editable=False)
class Meta:
abstract = True
FLICKR_LICENSES = (
('0', 'All Rights Reserved'),
('1', 'Attribution-NonCommercial-ShareAlike License'),
('2', 'Attribution-NonCommercial License'),
('3', 'Attribution-NonCommercial-NoDerivs License'),
('4', 'Attribution License'),
('5', 'Attribution-ShareAlike License'),
('6', 'Attribution-NoDerivs License'),
)
class BigIntegerField(models.IntegerField):
"""
Defines a PostgreSQL compatible IntegerField needed to prevent 'integer out of range' with large numbers.
"""
def get_internal_type(self):
return 'BigIntegerField'
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
db_type = 'NUMBER(19)'
else:
db_type = 'bigint'
return db_type
class PhotoManager(models.Manager):
allowed_sizes = ['Square', 'Thumbnail', 'Small', 'Medium 640', 'Large', 'Original', ]
def visible(self, *args, **kwargs):
return self.get_query_set().filter(show=True).filter(*args, **kwargs)
def public(self, *args, **kwargs):
return self.visible(ispublic=1, *args, **kwargs)
def _prepare_data(self, photo, flickr_user, info=None, exif=None, geo=None, **kwargs):
"""
Returns a dict with all information related to a photo. As some info
can be in several parameters, it returns data from the most especific
one.
@params photo: data for one photo as returned from 'flickr.people.getPhotos'
@params info: data for one photo as returned from 'flickr.photos.getInfo'
@params exif: data for one photo as returned from 'flickr.photos.getExif'
@params geo: data for one photo as returned from 'flickr.photos.geo.getLocation'
@params flickr_user: FlickrUser object for the given photo
@return: the dict with all photo data.
"""
photo_bunch = bunchify(photo)
photo_data = {}
if info and exif and geo:
""" Update last_sync only if all the info is retrieved from flickr """
photo_data.update({'last_sync' : now()})
if info:
""" With data returned from 'photos.getInfo' (no need of 'photo' dict)."""
info_bunch = bunchify(info['photo'])
photo_info = {
'flickr_id': info_bunch.id,
'server': info_bunch.server,
'farm': info_bunch.farm,
'secret': info_bunch.secret,
'originalsecret': getattr(info_bunch, 'originalsecret', ''),
'originalformat': getattr(info_bunch, 'originalformat', ''),
'title': info_bunch.title._content,
'description': info_bunch.description._content,
'date_posted': ts_to_dt(info_bunch.dates.posted, flickr_user.tzoffset),
'date_taken': '%s%s' % (info_bunch.dates.taken, flickr_user.tzoffset or ""),
'date_taken_granularity': info_bunch.dates.takengranularity,
'date_updated': ts_to_dt(info_bunch.dates.lastupdate, flickr_user.tzoffset),
'tags': info_bunch.tags.tag,
'ispublic': info_bunch.visibility.ispublic,
'isfriend': info_bunch.visibility.isfriend,
'isfamily': info_bunch.visibility.isfamily,
'license': info_bunch.license,
}
for url in info_bunch.urls.url:
if url.type == 'photopage':
photo_info['url_page'] = unslash(url._content)
else:
photo_info = {
'flickr_id': photo_bunch.id,
'server': photo_bunch.server,
'farm': photo_bunch.farm,
'secret': photo_bunch.secret,
'originalsecret': getattr(photo_bunch, 'originalsecret', ''),
'originalformat': getattr(photo_bunch, 'originalformat', ''),
'title': photo_bunch.title,
'description': getattr(getattr(photo_bunch, 'description', {}), '_content', ''),
'date_posted': ts_to_dt(getattr(photo_bunch, 'dateupload', ''), flickr_user.tzoffset),
'date_taken': getattr(photo_bunch, 'datetaken', ''),
'date_taken_granularity': getattr(photo_bunch, 'datetakengranularity', ''),
'date_updated': ts_to_dt(getattr(photo_bunch, 'lastupdate', ''), flickr_user.tzoffset),
'tags': getattr(photo_bunch, 'tags', ''),
'ispublic': photo_bunch.ispublic,
'isfriend': photo_bunch.isfriend,
'isfamily': photo_bunch.isfamily,
'license': photo_bunch.license,
}
if photo_info['date_taken']:
photo_info['date_taken'] = '%s%s' % (photo_info['date_taken'], flickr_user.tzoffset or "")
photo_data.update(photo_info)
if flickr_user:
photo_data.update({'user': flickr_user})
if exif:
""" Exif data can only come from 'photos.getExif' """
photo_data['exif'] = str(exif)
try:
photo_data['exif_camera'] = exif['photo']['camera']
for e in bunchify(exif['photo']['exif']):
if e.label == 'Exposure':
photo_data['exif_exposure'] = unslash(e.raw._content)
if e.label == 'Aperture':
photo_data['exif_aperture'] = unslash(e.clean._content)
if e.label == 'ISO Speed':
photo_data['exif_iso'] = e.raw._content
if e.label == 'Focal Length':
photo_data['exif_focal'] = e.clean._content
if e.label == 'Flash':
photo_data['exif_flash'] = e.raw._content
except KeyError:
pass
except AttributeError: # #'e.clean._content'
pass
if geo:
""" Geo data can come from 'photos.getGeo' """
try:
geo_data = {
'geo_latitude': geo['photo']['location']['latitude'],
'geo_longitude': geo['photo']['location']['longitude'],
'geo_accuracy': geo['photo']['location']['accuracy'],
}
except: # \todo TBD: not really tested
geo_data = {}
else:
geo_data = {
'geo_latitude': getattr(photo_bunch, 'latitude', ''),
'geo_longitude': getattr(photo_bunch, 'longitude', ''),
'geo_accuracy': getattr(photo_bunch, 'accuracy', ''),
}
photo_data.update(geo_data)
return photo_data
def _add_tags(self, obj, tags):
try:
obj.tags.set(*[tag for tag in tags.split()])
except KeyError:
pass
except:
# \todo TBD: implements feeders: from 'getPhotos' and from 'getInfo'
pass
def _add_sizes(self, obj, photo, sizes):
if sizes:
for size in sizes['sizes']['size']:
obj.sizes.create_from_json(photo=obj, size=size)
else:
for key, size in FLICKR_PHOTO_SIZES.items():
url_suffix = size.get('url_suffix', None)
if url_suffix and getattr(photo, 'url_%s' % url_suffix, None):
size_data = {
'label' : key,
'width' : getattr(photo, 'width_%s' % url_suffix, None),
'height' : getattr(photo, 'height_%s' % url_suffix, None),
'source' : getattr(photo, 'url_%s' % url_suffix, None),
}
obj.sizes.create_from_json(photo=obj, size=size_data)
def create_from_json(self, flickr_user, photo, info=None, sizes=None, exif=None, geo=None, **kwargs):
"""Create a record for flickr_user"""
photo_data = self._prepare_data(flickr_user=flickr_user, photo=photo, info=info, exif=exif, geo=geo, **kwargs)
tags = photo_data.pop('tags')
obj = self.create(**dict(photo_data.items() + kwargs.items()))
self._add_tags(obj, tags)
self._add_sizes(obj, photo, sizes)
return obj
def update_from_json(self, flickr_user, flickr_id, photo, info=None, sizes=None, exif=None, geo=None, **kwargs):
"""Update a record with flickr_id"""
update_tags = kwargs.pop('update_tags', False)
photo_data = self._prepare_data(photo=photo, flickr_user=flickr_user, info=info, exif=exif, geo=geo, **kwargs)
tags = photo_data.pop('tags')
result = self.filter(flickr_id=flickr_id).update(**dict(photo_data.items() + kwargs.items()))
if result == 1:
obj = self.get(flickr_id=flickr_id)
if update_tags:
obj.tags.clear()
self._add_tags(obj, tags)
if kwargs.get('update_sizes', False):
obj.sizes.clear() # Delete all sizes or only update them?
self._add_sizes(obj, photo, sizes)
return result
def create_or_update_from_json(self, flickr_user, info, sizes=None, exif=None, geo=None, **kwargs):
"""Pretty self explanatory"""
class Photo(FlickrModel):
"""http://www.flickr.com/services/api/explore/flickr.photos.getInfo"""
server = models.PositiveSmallIntegerField()
farm = models.PositiveSmallIntegerField()
secret = models.CharField(max_length=10)
originalsecret = models.CharField(max_length=10)
originalformat = models.CharField(max_length=4, null=True, blank=True)
title = models.CharField(max_length=255, null=True, blank=True)
description = models.TextField(null=True, blank=True)
date_posted = models.DateTimeField(null=True, blank=True)
date_taken = models.DateTimeField(null=True, blank=True)
date_taken_granularity = models.PositiveSmallIntegerField(null=True, blank=True)
date_updated = models.DateTimeField(null=True, blank=True)
url_page = models.URLField(max_length=255, null=True, blank=True)
tags = TaggableManager(blank=True)
slug = models.SlugField(max_length=255, null=True, blank=True)
"""http://www.flickr.com/services/api/explore/flickr.photos.getExif
Lots of data varying type and values, let's just put'em (json string in exif) there and we'll think later."""
exif = models.TextField(null=True, blank=True)
exif_camera = models.CharField(max_length=50, null=True, blank=True)
exif_exposure = models.CharField(max_length=10, null=True, blank=True)
exif_aperture = models.CharField(max_length=10, null=True, blank=True)
exif_iso = models.IntegerField(null=True, blank=True)
exif_focal = models.CharField(max_length=10, null=True, blank=True)
exif_flash = models.CharField(max_length=20, null=True, blank=True)
"""http://www.flickr.com/services/api/explore/flickr.photos.getPerms"""
ispublic = models.NullBooleanField()
isfriend = models.NullBooleanField()
isfamily = models.NullBooleanField()
"""http://www.flickr.com/services/api/explore/flickr.photos.geo.getLocation"""
geo_latitude = models.FloatField(null=True, blank=True)
geo_longitude = models.FloatField(null=True, blank=True)
geo_accuracy = models.PositiveSmallIntegerField(null=True, blank=True)
license = models.CharField(max_length=50, choices=FLICKR_LICENSES, default=0)
objects = PhotoManager()
class Meta:
ordering = ('-date_posted', '-date_taken',)
get_latest_by = 'date_posted'
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('flickr_photo', args=[self.flickr_id, ])
@property
def flickr_page_url(self):
return '%s%s/' % (self.user.flickr_page_url, self.flickr_id)
"""because 'Model.get_previous_by_FOO(**kwargs) For every DateField and DateTimeField that does not have null=True'"""
def get_next_by_date_posted(self):
try:
return Photo.objects.filter(date_posted__gte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('date_posted', 'date_taken')[:1].get()
except:
pass
def get_next_public_by_date_posted(self):
try:
return Photo.objects.public().filter(date_posted__gte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('date_posted', 'date_taken')[:1].get()
except:
pass
def get_previous_by_date_posted(self):
try:
return Photo.objects.filter(date_posted__lte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('-date_posted', '-date_taken')[:1].get()
except:
pass
def get_previous_public_by_date_posted(self):
try:
return Photo.objects.public().filter(date_posted__lte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('-date_posted', '-date_taken')[:1].get()
except:
pass
"""shortcuts - bringing some sanity"""
def get_next(self):
return self.get_next_public_by_date_posted()
def get_prev(self):
return self.get_previous_public_by_date_posted()
def get_next_by_date_taken(self):
try:
return Photo.objects.filter(date_taken__gte=self.date_taken)[:1].get()
except:
pass
def get_previous_by_date_taken(self):
try:
return Photo.objects.filter(date_taken__lte=self.date_taken)[:1].get()
except:
pass
def get_next_in_photoset(self, photoset):
if not hasattr(self, '_next_in_ps%s' % photoset.flickr_id):
photo = None
try:
if photoset.photos.filter(flickr_id=self.flickr_id).exists():
photo = photoset.photos.visible().filter(date_posted__gte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('date_posted', 'date_taken')[:1].get()
print photo
except:
pass
setattr(self, '_next_in_ps%s' % photoset.flickr_id, photo)
return getattr(self, '_next_in_ps%s' % photoset.flickr_id)
def get_previous_in_photoset(self, photoset):
if not hasattr(self, '_previous_in_ps%s' % photoset.flickr_id):
photo = None
try:
if photoset.photos.filter(flickr_id=self.flickr_id).exists():
photo = photoset.photos.visible().filter(date_posted__lte=self.date_posted).exclude(flickr_id=self.flickr_id).order_by('-date_posted', '-date_taken')[:1].get()
except:
pass
setattr(self, '_previous_in_ps%s' % photoset.flickr_id, photo)
return getattr(self, '_previous_in_ps%s' % photoset.flickr_id)
class PhotoSizeDataManager(models.Manager):
def _prepare_data(self, size, photo=None, **kwargs):
size_data = bunchify(size)
data = {'size': FLICKR_PHOTO_SIZES[size_data.label]['label'],
'width': size_data.width,
'height': size_data.height,
'source': size_data.source,
'url': unslash(getattr(size_data, 'url', '')),
}
if photo:
data['photo'] = photo
return data
def create_from_json(self, photo, size, **kwargs):
"""Create a record for photo size data"""
photosize_data = self._prepare_data(photo=photo, size=size, **kwargs)
obj = self.create(**dict(photosize_data.items() + kwargs.items()))
return obj
def update_from_json(self, photosize_id, size, **kwargs):
photosize_data = self._prepare_data(size=size, **kwargs)
result = self.filter(id=photosize_id).update(**dict(photosize_data.items()) + kwargs.items())
return result
class PhotoSizeData(models.Model):
photo = models.ForeignKey(Photo, related_name='sizes')
size = models.CharField(max_length=11, choices=[(v['label'], k) for k, v in FLICKR_PHOTO_SIZES.iteritems()])
width = models.PositiveIntegerField(null=True, blank=True)
height = models.PositiveIntegerField(null=True, blank=True)
source = models.URLField(null=True, blank=True)
url = models.URLField(null=True, blank=True)
objects = PhotoSizeDataManager()
class Meta:
unique_together = (('photo', 'size'),)
""" Dynamic addition of properties to access photo sizes information (stored in photo model fields) """
def attrproperty(getter_function):
class _Object(object):
def __init__(self, obj):
self.obj = obj
def __getattr__(self, attr):
return getter_function(self.obj, attr)
return property(_Object)
class PhotoSize(object):
_source = None
_url = None
label = None
secret_field = None
format_field = None
source_suffix = None
url_suffix = None
source_append = ''
object = None
def __init__(self, photo, **kwargs):
self.photo = photo
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.secret = getattr(self.photo, self.secret_field)
self.format = 'jpg' if not self.format_field else getattr(self.photo, self.format_field)
@classmethod
def as_property(cls, size):
data_dict = {'label': size['label'],
'secret_field': size.get('secret_field', 'secret'),
'format_field': size.get('format_field', None),
'source_suffix': size.get('source_suffix', None),
'url_suffix': size.get('url_suffix', None),
'source_append' : size.get('source_append', ''),
}
def func(self, attr):
obj = getattr(self, '_%s' % data_dict['label'], None)
if not obj:
obj = PhotoSize(self, **data_dict)
setattr(self, '_%s' % data_dict['label'], obj)
return getattr(obj, attr)
return func
def _get_object(self):
if not self.object:
self.object = self.photo.sizes.filter(size=self.label).get()
return self.object
def _get_source(self):
if not self._source:
if self.object:
self._source = self.object.source
if not self._source:
self._source = build_photo_source(self.photo.farm, self.photo.server, self.photo.flickr_id, self.secret, self.source_suffix, self.format, self.source_append)
return self._source
source = property(_get_source)
def _get_url(self):
if not self._url:
if self.object:
self._url = self.object.url
if not self._url:
self._url = '%s%s/sizes/%s/' % (self.photo.user.flickr_page_url, self.photo.flickr_id, self.url_suffix)
return self._url
url = property(_get_url)
@property
def width(self):
if self.object:
return self.object.width
return None
@property
def height(self):
if self.object:
return self.object.height
return None
for key, size in FLICKR_PHOTO_SIZES.items():
label = size.get('label', None)
setattr(Photo, label, attrproperty(PhotoSize.as_property(size=size)))
""" Deprecation warning """
for dato in ['source', 'url', 'width', 'height']:
method_deprecated = 'photo.%s_%s' % (label, dato)
method_suggested = 'photo.%s.%s' % (label, dato)
def get_property(self, label=label, dato=dato):
from warnings import warn
string = "Accessing photo sizes properties through '%s' is deprecated. Use '%s' instead." % (method_deprecated, method_suggested)
warn(string)
return getattr(getattr(self, label), dato)
def set_property(self, value, label=label, dato=dato):
"""
We cannot do it this way because we don't already have
a photo.id (not saved) and, if we save it, we get an
exception when finishing creating call in PhotoManager::create_from_json
if not self.id:
self.save()
size_data, created = PhotoSizeData.objects.get_or_create(photo = self, size=label)
size_data.dato = value
size_data.save()
"""
raise NotImplementedError
setattr(Photo, '%s_%s' % (label, dato), property(get_property, set_property))
def thumb(self):
return '<img src="%s"/>' % getattr(self, 'square_source')
thumb.allow_tags = True
setattr(Photo, 'thumbnail', thumb)
class PhotoSetManager(models.Manager):
def visible(self, *args, **kwargs):
return self.get_query_set().filter(show=True).filter(*args, **kwargs)
def _add_photos(self, obj, photos):
for photo in photos:
try:
flickr_photo = Photo.objects.get(flickr_id=photo.id)
obj.photos.add(flickr_photo)
except Exception as e:
pass
def _prepare_data(self, info, photos, flickr_user, exif=None, geo=None):
photoset = bunchify(info)
photos = bunchify(photos['photoset']['photo'])
data = {'flickr_id': photoset.id, 'server': photoset.server,
'secret': photoset.secret, 'farm': photoset.farm, 'primary': photoset.primary,
'title': photoset.title._content, 'description': photoset.description._content,
'date_posted': ts_to_dt(photoset.date_create, flickr_user.tzoffset), 'date_updated': ts_to_dt(photoset.date_update, flickr_user.tzoffset),
'photos': photos,
'last_sync': now(),
}
if flickr_user:
data['user'] = flickr_user
return data
def update_from_json(self, flickr_user, flickr_id, info, photos, update_photos=False, **kwargs):
"""Update a record with flickr_id"""
photoset_data = self._prepare_data(info=info, photos=photos, flickr_user=flickr_user, **kwargs)
photos = photoset_data.pop('photos')
result = self.filter(flickr_id=flickr_id).update(**dict(photoset_data.items() + kwargs.items()))
if result == 1 and update_photos:
obj = self.get(flickr_id=flickr_id)
obj.photos.clear()
self._add_photos(obj, photos)
return result
def create_from_json(self, flickr_user, info, photos, **kwargs):
"""Create a record for flickr_user"""
photoset_data = self._prepare_data(flickr_user=flickr_user, info=info, photos=photos, **kwargs)
photos = photoset_data.pop('photos')
obj = self.create(**dict(photoset_data.items() + kwargs.items()))
self._add_photos(obj, photos)
return obj
class PhotoSet(FlickrModel):
"""http://www.flickr.com/services/api/explore/flickr.photosets.getInfo"""
server = models.PositiveSmallIntegerField()
farm = models.PositiveSmallIntegerField()
secret = models.CharField(max_length=10)
title = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
primary = models.CharField(max_length=50, null=True, blank=True) # #flickr id of primary photo
date_posted = models.DateTimeField(null=True, blank=True)
date_updated = models.DateTimeField(null=True, blank=True)
photos = models.ManyToManyField(Photo, null=True, blank=True)
objects = PhotoSetManager()
class Meta:
ordering = ('-date_posted', '-id',)
get_latest_by = 'date_posted'
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('flickr_photoset', args=[self.flickr_id, ])
@property
def flickr_page_url(self):
return '%ssets/%s/' % (self.user.flickr_page_url, self.flickr_id)
def cover(self):
try:
return Photo.objects.get(flickr_id=self.primary)
except Photo.DoesNotExist:
try:
return Photo.objects.filter(photoset__id__in=[self.id, ]).latest()
except Photo.DoesNotExist:
pass
def thumbnail(self):
if self.cover():
return '<img src="%s"/>' % self.cover().square_source
thumbnail.allow_tags = True
class CollectionManager(models.Manager):
def _add_sets(self, obj, sets):
"""add sets that are present in our database"""
flickr_sets = PhotoSet.objects.filter(flickr_id__in=[s.id for s in sets])
obj.sets.add(*[s.id for s in flickr_sets])
def _prepare_data(self, info, flickr_user, parent=None):
col = bunchify(info)
data = {'flickr_id': col.id,
'title': col.title, 'description': col.description,
'parent': parent, 'last_sync': now(), 'icon': col.iconlarge,
}
if flickr_user:
data['user'] = flickr_user
if 'date_create' in col.keys():
data['date_created'] = ts_to_dt(col.date_create, flickr_user.tzoffset)
if 'set' in col.keys():
data['sets'] = col.set
if 'collection' in col.keys():
data['collections'] = col.collection
return data
def create_obj(self, info, parent=None, flickr_user=None, **kwargs):
data = self._prepare_data(info, parent=parent, flickr_user=flickr_user)
sets_data = cols_data = None
if 'sets' in data.keys():
sets_data = data.pop('sets')
if 'collections' in data.keys():
cols_data = data.pop('collections')
if kwargs.pop('update', False):
obj = self.filter(flickr_id=data['flickr_id']).update(**dict(data.items() + kwargs.items()))
if obj: # #filter().update() didn't return object
obj = self.get(flickr_id=data['flickr_id'])
else:
obj = self.create(**dict(data.items() + kwargs.items()))
else:
obj = self.create(**dict(data.items() + kwargs.items()))
if sets_data:
self._add_sets(obj, sets_data)
return obj, cols_data
def create_or_update_obj(self, info, parent=None, flickr_user=None, **kwargs):
return self.create_obj(info, parent, flickr_user, update=True, **kwargs)
def create_recursive(self, col, parent=None, flickr_user=None, **kwargs):
update_flag = kwargs.pop('update', False)
if update_flag:
obj, children = self.create_or_update_obj(col, parent, flickr_user)
else:
obj, children = self.create_obj(col, parent, flickr_user)
if children != None:
parent = obj
for child in children:
if update_flag:
self.create_or_update_obj(child, parent, flickr_user)
else:
self.create_recursive(child, parent, flickr_user)
return True
def create_from_usertree_json(self, flickr_user, tree, **kwargs):
collections = tree['collections']['collection']
for col in collections:
self.create_recursive(col, parent=None, flickr_user=flickr_user, **kwargs)
return True
def create_or_update_from_usertree_json(self, flickr_user, tree, **kwargs):
return self.create_from_usertree_json(flickr_user, tree, update=True, **kwargs)
class Collection(FlickrModel):
parent = models.ForeignKey('self', null=True)
title = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
icon = models.URLField(max_length=255, null=True, blank=True)
sets = models.ManyToManyField(PhotoSet, null=True)
date_created = models.DateTimeField(null=True, blank=True)
objects = CollectionManager()
class Meta:
ordering = ('-date_created',)
get_latest_by = 'date_created'
def __unicode__(self):
return u'%s' % self.title
@property
def flickr_page_url(self):
return '%scollections/%s/' % (self.user.flickr_page_url, (self.flickr_id.split('-')[-1]))
def thumbnail(self):
return '<img src="%s"/>' % self.icon.replace('_l.', '_s.')
thumbnail.allow_tags = True
class JsonCache(models.Model):
flickr_id = models.CharField(max_length=50, null=True, blank=True)
info = models.TextField(null=True, blank=True)
sizes = models.TextField(null=True, blank=True)
exif = models.TextField(null=True, blank=True)
geo = models.TextField(null=True, blank=True)
exception = models.TextField(null=True, blank=True)
added = models.DateTimeField(auto_now=True, auto_now_add=True)
class PhotoDownload(models.Model):
def upload_path(self, filename):
dirbase = getattr(settings, 'FLICKR_DOWNLOAD_DIRBASE', 'flickr')
dirformat = getattr(settings, 'FLICKR_DOWNLOAD_DIRFORMAT', '%Y/%Y-%m')
return '/'.join([dirbase, str(self.photo.date_posted.date().strftime(dirformat)), filename])
photo = models.OneToOneField(Photo)
url = models.URLField(max_length=255, null=True, blank=True)
image_file = models.FileField(upload_to=upload_path, null=True, blank=True)
size = models.CharField(max_length=11, choices=[(v['label'], k) for k, v in FLICKR_PHOTO_SIZES.iteritems()])
errors = models.TextField(null=True, blank=True)
date_downloaded = models.DateTimeField(auto_now=True, auto_now_add=True)
def __unicode__(self):
return u'%s' % str(self.photo)
|
py
|
1a5c06b09f1f1d18e3b0fe87a7244674581be9e5
|
"""Holder for the (test kind, list of tests) pair with additional metadata their execution."""
from __future__ import absolute_import
import itertools
import threading
import time
from . import report as _report
from . import summary as _summary
from .. import config as _config
from .. import selector as _selector
def synchronized(method):
"""Provide decorator to enfore instance lock ownership when calling the method."""
def synced(self, *args, **kwargs):
"""Sync an instance lock."""
lock = getattr(self, "_lock")
with lock:
return method(self, *args, **kwargs)
return synced
class Suite(object): # pylint: disable=too-many-instance-attributes
"""A suite of tests of a particular kind (e.g. C++ unit tests, dbtests, jstests)."""
def __init__(self, suite_name, suite_config, suite_options=_config.SuiteOptions.ALL_INHERITED):
"""Initialize the suite with the specified name and configuration."""
self._lock = threading.RLock()
self._suite_name = suite_name
self._suite_config = suite_config
self._suite_options = suite_options
self.test_kind = self.get_test_kind_config()
self.tests, self.excluded = self._get_tests_for_kind(self.test_kind)
self.return_code = None # Set by the executor.
self._suite_start_time = None
self._suite_end_time = None
self._test_start_times = []
self._test_end_times = []
self._reports = []
# We keep a reference to the TestReports from the currently running jobs so that we can
# report intermediate results.
self._partial_reports = None
def _get_tests_for_kind(self, test_kind):
"""Return the tests to run based on the 'test_kind'-specific filtering policy."""
test_info = self.get_selector_config()
# The mongos_test doesn't have to filter anything, the test_info is just the arguments to
# the mongos program to be used as the test case.
if test_kind == "mongos_test":
mongos_options = test_info # Just for easier reading.
if not isinstance(mongos_options, dict):
raise TypeError("Expected dictionary of arguments to mongos")
return [mongos_options], []
tests, excluded = _selector.filter_tests(test_kind, test_info)
if _config.ORDER_TESTS_BY_NAME:
return sorted(tests, key=str.lower), sorted(excluded, key=str.lower)
return tests, excluded
def get_name(self):
"""Return the name of the test suite."""
return self._suite_name
def get_display_name(self):
"""Return the name of the test suite with a unique identifier for its SuiteOptions."""
if self.options.description is None:
return self.get_name()
return "{} ({})".format(self.get_name(), self.options.description)
def get_selector_config(self):
"""Return the "selector" section of the YAML configuration."""
if "selector" not in self._suite_config:
return {}
selector = self._suite_config["selector"].copy()
if self.options.include_tags is not None:
if "include_tags" in selector:
selector["include_tags"] = {
"$allOf": [
selector["include_tags"],
self.options.include_tags,
]
}
elif "exclude_tags" in selector:
selector["exclude_tags"] = {
"$anyOf": [
selector["exclude_tags"],
{"$not": self.options.include_tags},
]
}
else:
selector["include_tags"] = self.options.include_tags
return selector
def get_executor_config(self):
"""Return the "executor" section of the YAML configuration."""
return self._suite_config["executor"]
def get_test_kind_config(self):
"""Return the "test_kind" section of the YAML configuration."""
return self._suite_config["test_kind"]
@property
def options(self):
"""Get the options."""
return self._suite_options.resolve()
def with_options(self, suite_options):
"""Return a Suite instance with the specified resmokelib.config.SuiteOptions."""
return Suite(self._suite_name, self._suite_config, suite_options)
@synchronized
def record_suite_start(self):
"""Record the start time of the suite."""
self._suite_start_time = time.time()
@synchronized
def record_suite_end(self):
"""Record the end time of the suite."""
self._suite_end_time = time.time()
@synchronized
def record_test_start(self, partial_reports):
"""Record the start time of an execution.
The result is stored in the TestReports for currently running jobs.
"""
self._test_start_times.append(time.time())
self._partial_reports = partial_reports
@synchronized
def record_test_end(self, report):
"""Record the end time of an execution."""
self._test_end_times.append(time.time())
self._reports.append(report)
self._partial_reports = None
@synchronized
def get_active_report(self):
"""Return the partial report of the currently running execution, if there is one."""
if not self._partial_reports:
return None
return _report.TestReport.combine(*self._partial_reports)
@synchronized
def get_reports(self):
"""Return the list of reports.
If there's an execution currently in progress, then a report for the partial results
is included in the returned list.
"""
if self._partial_reports is not None:
return self._reports + [self.get_active_report()]
return self._reports
@synchronized
def summarize(self, sb):
"""Append a summary of the suite onto the string builder 'sb'."""
if not self._reports and not self._partial_reports:
sb.append("No tests ran.")
summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
elif not self._reports and self._partial_reports:
summary = self.summarize_latest(sb)
elif len(self._reports) == 1 and not self._partial_reports:
summary = self._summarize_execution(0, sb)
else:
summary = self._summarize_repeated(sb)
summarized_group = " %ss: %s" % (self.test_kind, "\n ".join(sb))
if summary.num_run == 0:
sb.append("Suite did not run any tests.")
return
# Override the 'time_taken' attribute of the summary if we have more accurate timing
# information available.
if self._suite_start_time is not None and self._suite_end_time is not None:
time_taken = self._suite_end_time - self._suite_start_time
summary = summary._replace(time_taken=time_taken)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
sb.append(summarized_group)
@synchronized
def summarize_latest(self, sb):
"""Return a summary of the latest execution of the suite.
Also append a summary of that execution onto the string builder 'sb'.
If there's an execution currently in progress, then the partial
summary of that execution is appended to 'sb'.
"""
if self._partial_reports is None:
return self._summarize_execution(-1, sb)
active_report = _report.TestReport.combine(*self._partial_reports)
# Use the current time as the time that this suite finished running.
end_time = time.time()
return self._summarize_report(active_report, self._test_start_times[-1], end_time, sb)
def _summarize_repeated(self, sb):
"""Return the summary information of all executions.
Also append each execution's summary onto the string builder 'sb' and
information of how many repetitions there were.
"""
reports = self.get_reports() # Also includes the combined partial reports.
num_iterations = len(reports)
start_times = self._test_start_times[:]
end_times = self._test_end_times[:]
if self._partial_reports:
end_times.append(time.time()) # Add an end time in this copy for the partial reports.
total_time_taken = end_times[-1] - start_times[0]
sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
for iteration in xrange(num_iterations):
# Summarize each execution as a bulleted list of results.
bulleter_sb = []
summary = self._summarize_report(reports[iteration], start_times[iteration],
end_times[iteration], bulleter_sb)
combined_summary = _summary.combine(combined_summary, summary)
for (i, line) in enumerate(bulleter_sb):
# Only bullet first line, indent others.
prefix = "* " if i == 0 else " "
sb.append(prefix + line)
return combined_summary
def _summarize_execution(self, iteration, sb):
"""Return the summary information of the execution given by 'iteration'.
Also append a summary of that execution onto the string builder 'sb'.
"""
return self._summarize_report(self._reports[iteration], self._test_start_times[iteration],
self._test_end_times[iteration], sb)
def _summarize_report(self, report, start_time, end_time, sb):
"""Return the summary information of the execution.
The summary is for 'report' that started at 'start_time' and finished at 'end_time'.
Also append a summary of that execution onto the string builder 'sb'.
"""
time_taken = end_time - start_time
# Tests that were interrupted are treated as failures because (1) the test has already been
# started and therefore isn't skipped and (2) the test has yet to finish and therefore
# cannot be said to have succeeded.
num_failed = report.num_failed + report.num_interrupted
num_run = report.num_succeeded + report.num_errored + num_failed
num_skipped = len(self.tests) + report.num_dynamic - num_run
if report.num_succeeded == num_run and num_skipped == 0:
sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
num_failed, report.num_errored)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
if num_failed > 0:
sb.append("The following tests failed (with exit code):")
for test_info in itertools.chain(report.get_failed(), report.get_interrupted()):
sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
if report.num_errored > 0:
sb.append("The following tests had errors:")
for test_info in report.get_errored():
sb.append(" %s" % (test_info.test_id))
return summary
@staticmethod
def log_summaries(logger, suites, time_taken):
"""Log summary of all suites."""
sb = []
sb.append("Summary of all suites: %d suites ran in %0.2f seconds" % (len(suites),
time_taken))
for suite in suites:
suite_sb = []
suite.summarize(suite_sb)
sb.append(" %s: %s" % (suite.get_display_name(), "\n ".join(suite_sb)))
logger.info("=" * 80)
logger.info("\n".join(sb))
|
py
|
1a5c0766847eb6abc5817a0bd35ee8a655502699
|
from .Apps import all_apps, App
from .Projects import all_projects
from .Projects import Projects
from .Pods import all_pods
from .utils.oc import oc
import random
import logging
import time
class Task:
def __init__(self,config,task):
self.config = config
self.task = task
self.templates = config["appTemplates"]
self.logger = logging.getLogger('reliability')
random.seed()
def execute(self):
all_apps.init()
all_projects.init()
# all_projects = Projects()
# all_projects.projects = 2
# all_projects.max_projects = 9
# all_projects.projects = {'cakephp-mysql-example-0': {"app": None, "name": 'cakephp-mysql-example-0'}, 'nodejs-mongodb-example-1':{"app": None, "name": 'nodejs-mongodb-example-1'}}
all_pods.init()
resource = self.task["resource"]
action = self.task["action"]
if resource == "projects":
if action == "create":
self.logger.debug("create projects")
quantity = self.task["quantity"]
for i in range(0, quantity):
project_base_name = random.choice(self.templates)["template"]
new_project = all_projects.add(project_base_name)
if new_project != None:
app = App(project_base_name, new_project.name, project_base_name, project_base_name)
new_project.app = app
all_apps.add(app)
elif action == "delete":
self.logger.debug("delete projects")
projects = list(all_projects.projects.keys())
project_to_delete = random.choice(projects)
all_projects.delete(project_to_delete)
elif action == "check":
self.logger.debug("check projects")
all_projects.check_projects()
elif action == "modify":
for project_key in all_projects.projects.keys():
all_projects.projects[project_key].modify()
elif resource == "apps":
if action == "build":
self.logger.debug("Build apps")
if len(all_apps.apps) > 0:
apps = list(all_apps.apps.keys())
app_to_build_key = random.choice(apps)
app_to_build = all_apps.apps[app_to_build_key]
app_to_build.build()
elif action == "scaleUp":
self.logger.debug("ScaleUp apps")
all_apps.init()
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].scale_up()
elif action =="scaleDown":
self.logger.debug("ScaleDown apps")
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].scale_down()
time.sleep(30)
elif action == "visit":
self.logger.debug("Visit Apps")
for app_key in all_apps.apps.keys():
all_apps.apps[app_key].visit()
elif resource == "pods":
if action == "check":
self.logger.debug("Check pods")
all_pods.check()
elif resource == "session" :
if action == "login":
result, rc = oc("login -u " + self.task["user"] + " -p " + self.task["password"])
if rc !=0 :
self.logger.error("Login failed")
|
py
|
1a5c087cd75fb44fc436dc7454dc183c87007d07
|
import contextlib
import sys
import os
import torch
import unittest
from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold
from common_utils import get_tmp_dir
@contextlib.contextmanager
def get_list_of_videos(num_videos=5, sizes=None, fps=None):
with get_tmp_dir() as tmp_dir:
names = []
for i in range(num_videos):
if sizes is None:
size = 5 * (i + 1)
else:
size = sizes[i]
if fps is None:
f = 5
else:
f = fps[i]
data = torch.randint(0, 255, (size, 300, 400, 3), dtype=torch.uint8)
name = os.path.join(tmp_dir, "{}.mp4".format(i))
names.append(name)
io.write_video(name, data, fps=f)
yield names
class Tester(unittest.TestCase):
def test_unfold(self):
a = torch.arange(7)
r = unfold(a, 3, 3, 1)
expected = torch.tensor([
[0, 1, 2],
[3, 4, 5],
])
self.assertTrue(r.equal(expected))
r = unfold(a, 3, 2, 1)
expected = torch.tensor([
[0, 1, 2],
[2, 3, 4],
[4, 5, 6]
])
self.assertTrue(r.equal(expected))
r = unfold(a, 3, 2, 2)
expected = torch.tensor([
[0, 2, 4],
[2, 4, 6],
])
self.assertTrue(r.equal(expected))
@unittest.skipIf(not io.video._av_available(), "this test requires av")
def test_video_clips(self):
with get_list_of_videos(num_videos=3) as video_list:
video_clips = VideoClips(video_list, 5, 5, num_workers=2)
self.assertEqual(video_clips.num_clips(), 1 + 2 + 3)
for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 6)
self.assertEqual(video_clips.num_clips(), 0 + 1 + 2)
for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
video_clips = VideoClips(video_list, 6, 1)
self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1))
for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]:
video_idx, clip_idx = video_clips.get_clip_location(i)
self.assertEqual(video_idx, v_idx)
self.assertEqual(clip_idx, c_idx)
@unittest.skipIf(not io.video._av_available(), "this test requires av")
def test_video_clips_custom_fps(self):
with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list:
num_frames = 4
for fps in [1, 3, 4, 10]:
video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2)
for i in range(video_clips.num_clips()):
video, audio, info, video_idx = video_clips.get_clip(i)
self.assertEqual(video.shape[0], num_frames)
self.assertEqual(info["video_fps"], fps)
self.assertEqual(info, {"video_fps": fps})
# TODO add tests checking that the content is right
def test_compute_clips_for_video(self):
video_pts = torch.arange(30)
# case 1: single clip
num_frames = 13
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 13
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames,
orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
self.assertEqual(len(clips), 1)
self.assertTrue(clips.equal(idxs))
self.assertTrue(idxs[0].equal(resampled_idxs))
# case 2: all frames appear only once
num_frames = 4
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 12
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames,
orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
self.assertEqual(len(clips), 3)
self.assertTrue(clips.equal(idxs))
self.assertTrue(idxs.flatten().equal(resampled_idxs))
if __name__ == '__main__':
unittest.main()
|
py
|
1a5c0a4998bff8c06589e3535f6f37775d01f119
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to generate sample bazelrc file."""
import os
from string import Template
from util import get_git_root
GIT_ROOT = get_git_root()
BAZELRC_DIR = os.path.join(GIT_ROOT, "bazelrc")
LATEST_BAZELRC_LINK = BAZELRC = os.path.join(BAZELRC_DIR, "latest.bazelrc")
LICENCE_TPL = os.path.join(GIT_ROOT, "release", "license.tpl")
BAZELRC_TPL = os.path.join(GIT_ROOT, "release", "bazelrc.tpl")
def create_bazelrc_and_update_link(bazel_version):
"""Creates new sample .bazelrc file and update latest.bazelrc symlink.
Example bazelrc files can be found in directory bazelrc/.
There is one sample bazelrc file Bazel version. bazelrc/latest.bazelrc should
always be symlinked to the .bazelrc file for the latest version of Bazel.
If the file already exists in this repo, the script will delete it and
generate new one.
Args:
bazel_version: string, the version of Bazel used to generate the configs.
"""
bazelrc_path = os.path.join(
BAZELRC_DIR, "bazel-{version}.bazelrc".format(version=bazel_version))
# Remove old version of this .bazelrc file.
if os.path.exists(bazelrc_path):
os.remove(bazelrc_path)
with open(bazelrc_path, "w") as bazelrc_file:
# Write license header.
with open(LICENCE_TPL, "r") as license_header:
bazelrc_file.write(license_header.read())
# Write sample .bazelrc body.
with open(BAZELRC_TPL, "r") as tpl_file:
tpl = Template(tpl_file.read()).substitute(BAZEL_VERSION=bazel_version)
bazelrc_file.write(tpl)
# Update latest.bazelrc link
if os.path.exists(LATEST_BAZELRC_LINK):
os.remove(LATEST_BAZELRC_LINK)
os.symlink(os.path.basename(bazelrc_path), LATEST_BAZELRC_LINK)
|
py
|
1a5c0ae070ca22c130d96128445d4731ad5a2d79
|
# -*- coding: utf-8 -*-
"""Tools for inspecting Python objects.
Uses syntax highlighting for presenting the various information elements.
Similar in spirit to the inspect module, but all calls take a name argument to
reference the name under which an object is being read.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
__all__ = ['Inspector','InspectColors']
# stdlib modules
import ast
import inspect
from inspect import signature
import linecache
import warnings
import os
from textwrap import dedent
import types
import io as stdlib_io
from typing import Union
# IPython's own
from IPython.core import page
from IPython.lib.pretty import pretty
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import openpy
from IPython.utils import py3compat
from IPython.utils.dir2 import safe_hasattr
from IPython.utils.path import compress_user
from IPython.utils.text import indent
from IPython.utils.wildcard import list_namespace
from IPython.utils.wildcard import typestr2type
from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
from IPython.utils.py3compat import cast_unicode
from IPython.utils.colorable import Colorable
from IPython.utils.decorators import undoc
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
def pylight(code):
return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True))
# builtin docstrings to ignore
_func_call_docstring = types.FunctionType.__call__.__doc__
_object_init_docstring = object.__init__.__doc__
_builtin_type_docstrings = {
inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
types.FunctionType, property)
}
_builtin_func_type = type(all)
_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
InspectColors = PyColorize.ANSICodeColors
#****************************************************************************
# Auxiliary functions and objects
# See the messaging spec for the definition of all these fields. This list
# effectively defines the order of display
info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
'length', 'file', 'definition', 'docstring', 'source',
'init_definition', 'class_docstring', 'init_docstring',
'call_def', 'call_docstring',
# These won't be printed but will be used to determine how to
# format the object
'ismagic', 'isalias', 'isclass', 'found', 'name'
]
def object_info(**kw):
"""Make an object info dict with all fields present."""
infodict = {k:None for k in info_fields}
infodict.update(kw)
return infodict
def get_encoding(obj):
"""Get encoding for python source file defining obj
Returns None if obj is not defined in a sourcefile.
"""
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile is None:
return None
elif ofile.endswith(('.so', '.dll', '.pyd')):
return None
elif not os.path.isfile(ofile):
return None
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
encoding, lines = openpy.detect_encoding(buffer.readline)
return encoding
def getdoc(obj) -> Union[str,None]:
"""Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system.
"""
# Allow objects to offer customized documentation via a getdoc method:
try:
ds = obj.getdoc()
except Exception:
pass
else:
if isinstance(ds, str):
return inspect.cleandoc(ds)
docstr = inspect.getdoc(obj)
return docstr
def getsource(obj, oname='') -> Union[str,None]:
"""Wrapper around inspect.getsource.
This can be modified by other projects to provide customized source
extraction.
Parameters
----------
obj : object
an object whose source code we will attempt to extract
oname : str
(optional) a name under which the object is known
Returns
-------
src : unicode or None
"""
if isinstance(obj, property):
sources = []
for attrname in ['fget', 'fset', 'fdel']:
fn = getattr(obj, attrname)
if fn is not None:
encoding = get_encoding(fn)
oname_prefix = ('%s.' % oname) if oname else ''
sources.append(''.join(('# ', oname_prefix, attrname)))
if inspect.isfunction(fn):
sources.append(dedent(getsource(fn)))
else:
# Default str/repr only prints function name,
# pretty.pretty prints module name too.
sources.append(
'%s%s = %s\n' % (oname_prefix, attrname, pretty(fn))
)
if sources:
return '\n'.join(sources)
else:
return None
else:
# Get source for non-property objects.
obj = _get_wrapped(obj)
try:
src = inspect.getsource(obj)
except TypeError:
# The object itself provided no meaningful source, try looking for
# its class definition instead.
try:
src = inspect.getsource(obj.__class__)
except (OSError, TypeError):
return None
except OSError:
return None
return src
def is_simple_callable(obj):
"""True if obj is a function ()"""
return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
@undoc
def getargspec(obj):
"""Wrapper around :func:`inspect.getfullargspec`
In addition to functions and methods, this can also handle objects with a
``__call__`` attribute.
DEPRECATED: Deprecated since 7.10. Do not use, will be removed.
"""
warnings.warn('`getargspec` function is deprecated as of IPython 7.10'
'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
obj = obj.__call__
return inspect.getfullargspec(obj)
@undoc
def format_argspec(argspec):
"""Format argspect, convenience wrapper around inspect's.
This takes a dict instead of ordered arguments and calls
inspect.format_argspec with the arguments in the necessary order.
DEPRECATED (since 7.10): Do not use; will be removed in future versions.
"""
warnings.warn('`format_argspec` function is deprecated as of IPython 7.10'
'and will be removed in future versions.', DeprecationWarning, stacklevel=2)
return inspect.formatargspec(argspec['args'], argspec['varargs'],
argspec['varkw'], argspec['defaults'])
@undoc
def call_tip(oinfo, format_call=True):
"""DEPRECATED since 6.0. Extract call tip data from an oinfo dict."""
warnings.warn(
"`call_tip` function is deprecated as of IPython 6.0"
"and will be removed in future versions.",
DeprecationWarning,
stacklevel=2,
)
# Get call definition
argspec = oinfo.get('argspec')
if argspec is None:
call_line = None
else:
# Callable objects will have 'self' as their first argument, prune
# it out if it's there for clarity (since users do *not* pass an
# extra first argument explicitly).
try:
has_self = argspec['args'][0] == 'self'
except (KeyError, IndexError):
pass
else:
if has_self:
argspec['args'] = argspec['args'][1:]
call_line = oinfo['name']+format_argspec(argspec)
# Now get docstring.
# The priority is: call docstring, constructor docstring, main one.
doc = oinfo.get('call_docstring')
if doc is None:
doc = oinfo.get('init_docstring')
if doc is None:
doc = oinfo.get('docstring','')
return call_line, doc
def _get_wrapped(obj):
"""Get the original object if wrapped in one or more @decorators
Some objects automatically construct similar objects on any unrecognised
attribute access (e.g. unittest.mock.call). To protect against infinite loops,
this will arbitrarily cut off after 100 levels of obj.__wrapped__
attribute access. --TK, Jan 2016
"""
orig_obj = obj
i = 0
while safe_hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
i += 1
if i > 100:
# __wrapped__ is probably a lie, so return the thing we started with
return orig_obj
return obj
def find_file(obj) -> str:
"""Find the absolute path to the file where an object was defined.
This is essentially a robust wrapper around `inspect.getabsfile`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
fname : str
The absolute path to the file where the object was defined.
"""
obj = _get_wrapped(obj)
fname = None
try:
fname = inspect.getabsfile(obj)
except TypeError:
# For an instance, the file that matters is where its class was
# declared.
try:
fname = inspect.getabsfile(obj.__class__)
except (OSError, TypeError):
# Can happen for builtins
pass
except OSError:
pass
return cast_unicode(fname)
def find_source_lines(obj):
"""Find the line number in a file where an object was defined.
This is essentially a robust wrapper around `inspect.getsourcelines`.
Returns None if no file can be found.
Parameters
----------
obj : any Python object
Returns
-------
lineno : int
The line number where the object definition starts.
"""
obj = _get_wrapped(obj)
try:
lineno = inspect.getsourcelines(obj)[1]
except TypeError:
# For instances, try the class object like getsource() does
try:
lineno = inspect.getsourcelines(obj.__class__)[1]
except (OSError, TypeError):
return None
except OSError:
return None
return lineno
class Inspector(Colorable):
def __init__(self, color_table=InspectColors,
code_color_table=PyColorize.ANSICodeColors,
scheme=None,
str_detail_level=0,
parent=None, config=None):
super(Inspector, self).__init__(parent=parent, config=config)
self.color_table = color_table
self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
self.format = self.parser.format
self.str_detail_level = str_detail_level
self.set_active_scheme(scheme)
def _getdef(self,obj,oname='') -> Union[str,None]:
"""Return the call signature for any callable object.
If any exception is generated, None is returned instead and the
exception is suppressed."""
try:
return _render_signature(signature(obj), oname)
except:
return None
def __head(self,h) -> str:
"""Return a header string with proper colors."""
return '%s%s%s' % (self.color_table.active_colors.header,h,
self.color_table.active_colors.normal)
def set_active_scheme(self, scheme):
if scheme is not None:
self.color_table.set_active_scheme(scheme)
self.parser.color_table.set_active_scheme(scheme)
def noinfo(self, msg, oname):
"""Generic message when no information is found."""
print('No %s found' % msg, end=' ')
if oname:
print('for %s' % oname)
else:
print()
def pdef(self, obj, oname=''):
"""Print the call signature for any callable object.
If the object is a class, print the constructor information."""
if not callable(obj):
print('Object is not callable.')
return
header = ''
if inspect.isclass(obj):
header = self.__head('Class constructor information:\n')
output = self._getdef(obj,oname)
if output is None:
self.noinfo('definition header',oname)
else:
print(header,self.format(output), end=' ')
# In Python 3, all classes are new-style, so they all have __init__.
@skip_doctest
def pdoc(self, obj, oname='', formatter=None):
"""Print the docstring for any object.
Optional:
-formatter: a function to run the docstring through for specially
formatted docstrings.
Examples
--------
In [1]: class NoInit:
...: pass
In [2]: class NoDoc:
...: def __init__(self):
...: pass
In [3]: %pdoc NoDoc
No documentation found for NoDoc
In [4]: %pdoc NoInit
No documentation found for NoInit
In [5]: obj = NoInit()
In [6]: %pdoc obj
No documentation found for obj
In [5]: obj2 = NoDoc()
In [6]: %pdoc obj2
No documentation found for obj2
"""
head = self.__head # For convenience
lines = []
ds = getdoc(obj)
if formatter:
ds = formatter(ds).get('plain/text', ds)
if ds:
lines.append(head("Class docstring:"))
lines.append(indent(ds))
if inspect.isclass(obj) and hasattr(obj, '__init__'):
init_ds = getdoc(obj.__init__)
if init_ds is not None:
lines.append(head("Init docstring:"))
lines.append(indent(init_ds))
elif hasattr(obj,'__call__'):
call_ds = getdoc(obj.__call__)
if call_ds:
lines.append(head("Call docstring:"))
lines.append(indent(call_ds))
if not lines:
self.noinfo('documentation',oname)
else:
page.page('\n'.join(lines))
def psource(self, obj, oname=''):
"""Print the source code for an object."""
# Flush the source cache because inspect can return out-of-date source
linecache.checkcache()
try:
src = getsource(obj, oname=oname)
except Exception:
src = None
if src is None:
self.noinfo('source', oname)
else:
page.page(self.format(src))
def pfile(self, obj, oname=''):
"""Show the whole file where an object was defined."""
lineno = find_source_lines(obj)
if lineno is None:
self.noinfo('file', oname)
return
ofile = find_file(obj)
# run contents of file through pager starting at line where the object
# is defined, as long as the file isn't binary and is actually on the
# filesystem.
if ofile.endswith(('.so', '.dll', '.pyd')):
print('File %r is binary, not printing.' % ofile)
elif not os.path.isfile(ofile):
print('File %r does not exist, not printing.' % ofile)
else:
# Print only text files, not extension binaries. Note that
# getsourcelines returns lineno with 1-offset and page() uses
# 0-offset, so we must adjust.
page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
def _mime_format(self, text:str, formatter=None) -> dict:
"""Return a mime bundle representation of the input text.
- if `formatter` is None, the returned mime bundle has
a ``text/plain`` field, with the input text.
a ``text/html`` field with a ``<pre>`` tag containing the input text.
- if ``formatter`` is not None, it must be a callable transforming the
input text into a mime bundle. Default values for ``text/plain`` and
``text/html`` representations are the ones described above.
Note:
Formatters returning strings are supported but this behavior is deprecated.
"""
defaults = {
'text/plain': text,
'text/html': '<pre>' + text + '</pre>'
}
if formatter is None:
return defaults
else:
formatted = formatter(text)
if not isinstance(formatted, dict):
# Handle the deprecated behavior of a formatter returning
# a string instead of a mime bundle.
return {
'text/plain': formatted,
'text/html': '<pre>' + formatted + '</pre>'
}
else:
return dict(defaults, **formatted)
def format_mime(self, bundle):
text_plain = bundle['text/plain']
text = ''
heads, bodies = list(zip(*text_plain))
_len = max(len(h) for h in heads)
for head, body in zip(heads, bodies):
body = body.strip('\n')
delim = '\n' if '\n' in body else ' '
text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n'
bundle['text/plain'] = text
return bundle
def _get_info(
self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
):
"""Retrieve an info dict and format it.
Parameters
----------
obj : any
Object to inspect and return info from
oname : str (default: ''):
Name of the variable pointing to `obj`.
formatter : callable
info
already computed information
detail_level : integer
Granularity of detail level, if set to 1, give more information.
omit_sections : container[str]
Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
"""
info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
_mime = {
'text/plain': [],
'text/html': '',
}
def append_field(bundle, title:str, key:str, formatter=None):
if title in omit_sections or key in omit_sections:
return
field = info[key]
if field is not None:
formatted_field = self._mime_format(field, formatter)
bundle['text/plain'].append((title, formatted_field['text/plain']))
bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n'
def code_formatter(text):
return {
'text/plain': self.format(text),
'text/html': pylight(text)
}
if info['isalias']:
append_field(_mime, 'Repr', 'string_form')
elif info['ismagic']:
if detail_level > 0:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'File', 'file')
elif info['isclass'] or is_simple_callable(obj):
# Functions, methods, classes
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Init signature', 'init_definition', code_formatter)
append_field(_mime, 'Docstring', 'docstring', formatter)
if detail_level > 0 and info['source']:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
append_field(_mime, 'File', 'file')
append_field(_mime, 'Type', 'type_name')
append_field(_mime, 'Subclasses', 'subclasses')
else:
# General Python objects
append_field(_mime, 'Signature', 'definition', code_formatter)
append_field(_mime, 'Call signature', 'call_def', code_formatter)
append_field(_mime, 'Type', 'type_name')
append_field(_mime, 'String form', 'string_form')
# Namespace
if info['namespace'] != 'Interactive':
append_field(_mime, 'Namespace', 'namespace')
append_field(_mime, 'Length', 'length')
append_field(_mime, 'File', 'file')
# Source or docstring, depending on detail level and whether
# source found.
if detail_level > 0 and info['source']:
append_field(_mime, 'Source', 'source', code_formatter)
else:
append_field(_mime, 'Docstring', 'docstring', formatter)
append_field(_mime, 'Class docstring', 'class_docstring', formatter)
append_field(_mime, 'Init docstring', 'init_docstring', formatter)
append_field(_mime, 'Call docstring', 'call_docstring', formatter)
return self.format_mime(_mime)
def pinfo(
self,
obj,
oname="",
formatter=None,
info=None,
detail_level=0,
enable_html_pager=True,
omit_sections=(),
):
"""Show detailed information about an object.
Optional arguments:
- oname: name of the variable pointing to the object.
- formatter: callable (optional)
A special formatter for docstrings.
The formatter is a callable that takes a string as an input
and returns either a formatted string or a mime type bundle
in the form of a dictionary.
Although the support of custom formatter returning a string
instead of a mime type bundle is deprecated.
- info: a structure with some information fields which may have been
precomputed already.
- detail_level: if set to 1, more information is given.
- omit_sections: set of section keys and titles to omit
"""
info = self._get_info(
obj, oname, formatter, info, detail_level, omit_sections=omit_sections
)
if not enable_html_pager:
del info['text/html']
page.page(info)
def _info(self, obj, oname="", info=None, detail_level=0):
"""
Inspector.info() was likely improperly marked as deprecated
while only a parameter was deprecated. We "un-deprecate" it.
"""
warnings.warn(
"The `Inspector.info()` method has been un-deprecated as of 8.0 "
"and the `formatter=` keyword removed. `Inspector._info` is now "
"an alias, and you can just call `.info()` directly.",
DeprecationWarning,
stacklevel=2,
)
return self.info(obj, oname=oname, info=info, detail_level=detail_level)
def info(self, obj, oname="", info=None, detail_level=0) -> dict:
"""Compute a dict with detailed information about an object.
Parameters
----------
obj : any
An object to find information about
oname : str (default: '')
Name of the variable pointing to `obj`.
info : (default: None)
A struct (dict like with attr access) with some information fields
which may have been precomputed already.
detail_level : int (default:0)
If set to 1, more information is given.
Returns
-------
An object info dict with known fields from `info_fields`. Keys are
strings, values are string or None.
"""
if info is None:
ismagic = False
isalias = False
ospace = ''
else:
ismagic = info.ismagic
isalias = info.isalias
ospace = info.namespace
# Get docstring, special-casing aliases:
if isalias:
if not callable(obj):
try:
ds = "Alias to the system command:\n %s" % obj[1]
except:
ds = "Alias: " + str(obj)
else:
ds = "Alias to " + str(obj)
if obj.__doc__:
ds += "\nDocstring:\n" + obj.__doc__
else:
ds = getdoc(obj)
if ds is None:
ds = '<no docstring>'
# store output in a dict, we initialize it here and fill it as we go
out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None)
string_max = 200 # max size of strings to show (snipped if longer)
shalf = int((string_max - 5) / 2)
if ismagic:
out['type_name'] = 'Magic function'
elif isalias:
out['type_name'] = 'System alias'
else:
out['type_name'] = type(obj).__name__
try:
bclass = obj.__class__
out['base_class'] = str(bclass)
except:
pass
# String form, but snip if too long in ? form (full in ??)
if detail_level >= self.str_detail_level:
try:
ostr = str(obj)
str_head = 'string_form'
if not detail_level and len(ostr)>string_max:
ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
ostr = ("\n" + " " * len(str_head.expandtabs())).\
join(q.strip() for q in ostr.split("\n"))
out[str_head] = ostr
except:
pass
if ospace:
out['namespace'] = ospace
# Length (for strings and lists)
try:
out['length'] = str(len(obj))
except Exception:
pass
# Filename where object was defined
binary_file = False
fname = find_file(obj)
if fname is None:
# if anything goes wrong, we don't want to show source, so it's as
# if the file was binary
binary_file = True
else:
if fname.endswith(('.so', '.dll', '.pyd')):
binary_file = True
elif fname.endswith('<string>'):
fname = 'Dynamically generated function. No source code available.'
out['file'] = compress_user(fname)
# Original source code for a callable, class or property.
if detail_level:
# Flush the source cache because inspect can return out-of-date
# source
linecache.checkcache()
try:
if isinstance(obj, property) or not binary_file:
src = getsource(obj, oname)
if src is not None:
src = src.rstrip()
out['source'] = src
except Exception:
pass
# Add docstring only if no source is to be shown (avoid repetitions).
if ds and not self._source_contains_docstring(out.get('source'), ds):
out['docstring'] = ds
# Constructor docstring for classes
if inspect.isclass(obj):
out['isclass'] = True
# get the init signature:
try:
init_def = self._getdef(obj, oname)
except AttributeError:
init_def = None
# get the __init__ docstring
try:
obj_init = obj.__init__
except AttributeError:
init_ds = None
else:
if init_def is None:
# Get signature from init if top-level sig failed.
# Can happen for built-in types (list, etc.).
try:
init_def = self._getdef(obj_init, oname)
except AttributeError:
pass
init_ds = getdoc(obj_init)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
if init_def:
out['init_definition'] = init_def
if init_ds:
out['init_docstring'] = init_ds
names = [sub.__name__ for sub in type.__subclasses__(obj)]
if len(names) < 10:
all_names = ', '.join(names)
else:
all_names = ', '.join(names[:10]+['...'])
out['subclasses'] = all_names
# and class docstring for instances:
else:
# reconstruct the function definition and print it:
defln = self._getdef(obj, oname)
if defln:
out['definition'] = defln
# First, check whether the instance docstring is identical to the
# class one, and print it separately if they don't coincide. In
# most cases they will, but it's nice to print all the info for
# objects which use instance-customized docstrings.
if ds:
try:
cls = getattr(obj,'__class__')
except:
class_ds = None
else:
class_ds = getdoc(cls)
# Skip Python's auto-generated docstrings
if class_ds in _builtin_type_docstrings:
class_ds = None
if class_ds and ds != class_ds:
out['class_docstring'] = class_ds
# Next, try to show constructor docstrings
try:
init_ds = getdoc(obj.__init__)
# Skip Python's auto-generated docstrings
if init_ds == _object_init_docstring:
init_ds = None
except AttributeError:
init_ds = None
if init_ds:
out['init_docstring'] = init_ds
# Call form docstring for callable instances
if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
call_def = self._getdef(obj.__call__, oname)
if call_def and (call_def != out.get('definition')):
# it may never be the case that call def and definition differ,
# but don't include the same signature twice
out['call_def'] = call_def
call_ds = getdoc(obj.__call__)
# Skip Python's auto-generated docstrings
if call_ds == _func_call_docstring:
call_ds = None
if call_ds:
out['call_docstring'] = call_ds
return object_info(**out)
@staticmethod
def _source_contains_docstring(src, doc):
"""
Check whether the source *src* contains the docstring *doc*.
This is is helper function to skip displaying the docstring if the
source already contains it, avoiding repetition of information.
"""
try:
def_node, = ast.parse(dedent(src)).body
return ast.get_docstring(def_node) == doc
except Exception:
# The source can become invalid or even non-existent (because it
# is re-fetched from the source file) so the above code fail in
# arbitrary ways.
return False
def psearch(self,pattern,ns_table,ns_search=[],
ignore_case=False,show_all=False, *, list_types=False):
"""Search namespaces with wildcards for objects.
Arguments:
- pattern: string containing shell-like wildcards to use in namespace
searches and optionally a type specification to narrow the search to
objects of that type.
- ns_table: dict of name->namespaces for search.
Optional arguments:
- ns_search: list of namespace names to include in search.
- ignore_case(False): make the search case-insensitive.
- show_all(False): show all names, including those starting with
underscores.
- list_types(False): list all available object types for object matching.
"""
#print 'ps pattern:<%r>' % pattern # dbg
# defaults
type_pattern = 'all'
filter = ''
# list all object types
if list_types:
page.page('\n'.join(sorted(typestr2type)))
return
cmds = pattern.split()
len_cmds = len(cmds)
if len_cmds == 1:
# Only filter pattern given
filter = cmds[0]
elif len_cmds == 2:
# Both filter and type specified
filter,type_pattern = cmds
else:
raise ValueError('invalid argument string for psearch: <%s>' %
pattern)
# filter search namespaces
for name in ns_search:
if name not in ns_table:
raise ValueError('invalid namespace <%s>. Valid names: %s' %
(name,ns_table.keys()))
#print 'type_pattern:',type_pattern # dbg
search_result, namespaces_seen = set(), set()
for ns_name in ns_search:
ns = ns_table[ns_name]
# Normally, locals and globals are the same, so we just check one.
if id(ns) in namespaces_seen:
continue
namespaces_seen.add(id(ns))
tmp_res = list_namespace(ns, type_pattern, filter,
ignore_case=ignore_case, show_all=show_all)
search_result.update(tmp_res)
page.page('\n'.join(sorted(search_result)))
def _render_signature(obj_signature, obj_name) -> str:
"""
This was mostly taken from inspect.Signature.__str__.
Look there for the comments.
The only change is to add linebreaks when this gets too long.
"""
result = []
pos_only = False
kw_only = True
for param in obj_signature.parameters.values():
if param.kind == inspect._POSITIONAL_ONLY:
pos_only = True
elif pos_only:
result.append('/')
pos_only = False
if param.kind == inspect._VAR_POSITIONAL:
kw_only = False
elif param.kind == inspect._KEYWORD_ONLY and kw_only:
result.append('*')
kw_only = False
result.append(str(param))
if pos_only:
result.append('/')
# add up name, parameters, braces (2), and commas
if len(obj_name) + sum(len(r) + 2 for r in result) > 75:
# This doesn’t fit behind “Signature: ” in an inspect window.
rendered = '{}(\n{})'.format(obj_name, ''.join(
' {},\n'.format(r) for r in result)
)
else:
rendered = '{}({})'.format(obj_name, ', '.join(result))
if obj_signature.return_annotation is not inspect._empty:
anno = inspect.formatannotation(obj_signature.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
|
py
|
1a5c0b36e3b8ee789ebf927494482b077801d477
|
class Solution:
def minCostToSupplyWater(self, n: int, wells, pipes) -> int:
q = sorted([[w, u, v] for u, v, w in pipes] + [[w, 0, i+1] for i, w in enumerate(wells)])
uf = [i for i in range(n+1)]
res = count = 0
def find(x):
if (x != uf[x]):
uf[x] = find(uf[x])
return uf[x]
def union(x, y):
uf[x] = y
for w, u, v in q:
rA, rB = find(u), find(v)
if rA == rB:
continue
union(rA, rB)
res += w
count += 1
if count == n:
return res
return res
|
py
|
1a5c0bb53b0090b0f954d044c56d617b12eccb14
|
import attr, os
from pprint import PrettyPrinter
from mavetools.client.client import Client
from mavetools.models.experiment import Experiment
pp = PrettyPrinter(indent=2) # displayes results in readable format
# check environment variables and see if variable named MAVEDB_BASE_URL exists and return value
# if the value does not exist, an empty string is returned instead
base_url = os.getenv("MAVEDB_BASE_URL", "")
experiment_urn = "urn:mavedb:00000001-a" # the urn of the experiment we want to get
# Generate a new auth_token in your profile and post it here
auth_token = "AseyaNLLhqv9jAm0joMkq2oqB0bw3GKxTclkT2NtG340RF6CfdM2UC3j8Fv4RpbQ"
# auth_token =
# if the base url exists, the client object is instantiated with that value
# otherwise the client object is instantiated with default value which points to localhost
client = (
Client(base_url, auth_token=auth_token)
if base_url
else Client(auth_token=auth_token)
)
# using the client object, GET the model instance of an Experiment with a particular urn
# GET retrieves a resource from the server via the appropriate API endpoint
experiment = client.get_model_instance(Experiment, experiment_urn)
# display results
pp.pprint(attr.asdict(experiment))
|
py
|
1a5c0c71a4d25d4365a1946b6e01a78f1695df42
|
from input import parse
from word2vec1 import word2vec, dictionaries
from collections import namedtuple,OrderedDict
import numpy as np
import json
import gensim
import copy
import logging
def training(fn, wordvecpath):
if not wordvecpath:
word2vec(fn)
wordvecpath = './tmpdata/vecs.bin'
ndeprel = dictionaries(fn)
X_lengths = np.array([])
Arcs = namedtuple('Arcs', ['headid', 'headform', 'tailid', 'tailform', 'deprel'])
Transition = namedtuple('Transition', ['transition', 'label'])
with open('./tmpdata/deprel.json', 'r') as fp:
dictionary2 = json.load(fp)
f = open(fn, 'r')
data = f.read()
mode = gensim.models.Word2Vec.load(wordvecpath)
model = mode.wv
vecdims = mode.layer1_size
vecdims = vecdims+11+2+2
del mode
Y2 = np.zeros([1, 4+ndeprel])
X2 = np.zeros([1, vecdims*5+4])
sid=0
buffer1 = []
stack = []
arcs = []
listofTransitions = []
for sent in parse(data):
del buffer1[:]
del stack[:]
del arcs[:]
buffer1 = copy.deepcopy(sent)
buffer1.append(OrderedDict(
[("id", 0), ("form", 'root'), ("lemma", 'root'), ("upostag", 'root'), ("xpostag", 'root'), ("feats", 'root'), ("head", -1),
("deprel", 'root'), ("deps", 'root'), ("misc", 'root'), ]))
flag=True
for word in sent:
if not pcheck(word['id'],word['head'],sent):
del buffer1[:]
flag=False
break
i=0
while buffer1:
transi, label = oracle(stack, buffer1, arcs)
trans = Transition(transi, label)
i+=1
X,t = nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel)
X2 = np.vstack((X2,X))
Y2 = np.vstack((Y2,t))
if trans.transition == 0: # SHIFT
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
elif trans.transition == 1: # REDUCE
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 2: # LERFT ARC
arcs.append(Arcs(buffer1[0]['id'], buffer1[0]['form'], stack[0]['id'], stack[0]['form'], trans.label))
del stack[0]
listofTransitions.append(trans.transition)
elif trans.transition == 3: # RIGHT ARC
arcs.append(Arcs(stack[0]['id'], stack[0]['form'], buffer1[0]['id'], buffer1[0]['form'], trans.label))
stack.insert(0, buffer1[0])
del buffer1[0]
listofTransitions.append(trans.transition)
if flag : X_lengths = np.append(X_lengths, i)
sid+=1
logging.info ('vectorising sentence : '+str(sid))
X2 = np.delete(X2, 0, axis=0)
Y2 = np.delete(Y2, 0, axis=0)
return X2,Y2,X_lengths
def oracle(stack, buffer1, arcs):
global i
if not stack:
return 0, ""
if not buffer1[0] :
del buffer1[:]
i-=1
return 1, ""
s0id = stack[0]['id']
s0head = stack[0]['head']
b0id = buffer1[0]['id']
b0head = buffer1[0]['head']
if b0id == s0head:
return 2, stack[0]['deprel']
elif s0id == b0head:
return 3, buffer1[0]['deprel']
elif head(stack[0], arcs) != -1 and b0head<s0head :
return 1, ""
return 0, ""
def head(stackc, arcs):
for a in arcs:
if a.headid == stackc['head']:
return a.headid
return -1
def nn(stack, buffer1, trans, dictionary2, model, sent, arcs, vecdims, ndeprel):
mones = [-1] * vecdims
ones = [1] * (vecdims-4)
zeros = [0] * (vecdims-15)
dep = [-1]*4
sentenc = np.array([])
words=["_","_","_","_","_"]
if stack:
words.pop(0)
words.insert(0,stack[0])
dep[0] = iofdeprel(rightchild(stack[0], arcs))
dep[1] = iofdeprel(leftchild(stack[0], arcs))
if len(stack) > 1:
words.pop(1)
words.insert(1,stack[1])
if buffer1:
words.pop(2)
words.insert(2,buffer1[0])
dep[2] = iofdeprel(rightchild(buffer1[0], arcs))
dep[3] = iofdeprel(leftchild(buffer1[0], arcs))
if len(buffer1) > 1:
words.pop(3)
words.insert(3,buffer1[1])
if len(buffer1) > 2:
words.pop(4)
words.insert(4, buffer1[2])
for w in words:
if w == '_':
sentenc = np.hstack((sentenc, mones))
elif w['form'] == 'root':
sentenc = np.hstack((sentenc, ones, D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] in model.vocab:
sentenc = np.hstack((sentenc, model[w['form']], featureids(w['feats'], dictionary2),D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] is not None:
sentenc = np.hstack((sentenc, zeros, featureids(w['feats'], dictionary2), D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
else:
sentenc = np.hstack((sentenc, mones))
sentenc = np.hstack((sentenc,dep))
t = trans.transition
if t > 1:
t = np.hstack((np.eye(4)[t], np.eye(ndeprel)[iofdeprel(trans.label)-1]))
else:
t = np.hstack((np.eye(4)[t], np.zeros(ndeprel)))
return sentenc, t
def D(key, dic):
if dic.get(key): return dic[key]
return -1;
def featureids(feats1, dic):
f=[-1]*11
if feats1['cat'] in dic: f[0] = dic[feats1['cat']]
if feats1['gen'] in dic: f[1] = dic[feats1['gen']]
if feats1['num'] in dic: f[2] = dic[feats1['num']]
if feats1['pers'] in dic: f[3] = dic[feats1['pers']]
if feats1['case'] in dic: f[4] = dic[feats1['case']]
if feats1['vib'] in dic: f[5] = dic[feats1['vib']]
if feats1['tam'] in dic: f[6] = dic[feats1['tam']]
if feats1['chunkId'] in dic: f[7] = dic[feats1['chunkId']]
if feats1['chunkType'] in dic: f[8] = dic[feats1['chunkType']]
if feats1['stype'] in dic: f[9] = dic[feats1['stype']]
if feats1['voicetype'] in dic: f[0] = dic[feats1['voicetype']]
return f
def rightchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid > stackc['id']:
if id==-1 :
id=a.tailid
deprel=a.deprel
else :
if id < a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def leftchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid < stackc['id'] :
if not id :
id = a.tailid
deprel = a.deprel
else :
if id > a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def iofdeprel(ele):
with open('./tmpdata/deprel.json', 'r') as fp:
dict = json.load(fp)
if ele in dict: return dict[ele]
return -1
def pcheck(id1,id2,sentence):
flag=True
if id2>id1:
for words in sentence[id1:id2-1]:
if words['head'] > id2 or words['head'] < id1:
flag=False
break
if id1>id2:
for words in sentence[id2:id1-1]:
if words['head'] > id1 or words['head'] < id2 :
flag=False
break
return flag
|
py
|
1a5c0ec56d1b0962a2df1fbad75c0030e5c0a1c4
|
import os
import time
DEBUG = False
API_URL_PREFIX = "/api/v0"
HOST = '0.0.0.0'
PORT = 5001
ENABLE_CORS = False
#folders and file path
download_folder = 'upload'
TASK_STAT = 'FILE-CONVERTER'
CONSUMER_GROUP = 'anuvaad-etl-fc-consumer-group'
#mongo
MONGO_IP = 'MONGO_IP'
DEFAULT_VALUE = 'localhost'
MONGO_DB_IDENTIFIER = 'MONGO_DB'
DEFAULT_MONGO_DB_IDENTIFIER = 'preprocessing'
MONGO_SERVER_URL = os.environ.get(MONGO_IP, DEFAULT_VALUE)
MONGO_DB = os.environ.get(MONGO_DB_IDENTIFIER, DEFAULT_MONGO_DB_IDENTIFIER)
# kafka
consumer_grp_default = 'anuvaad-etl-fc-consumer-group'
consumer_grp_identifier = 'KAFKA_ANUVAAD_ETL_FC_CONSUMER_GRP'
CONSUMER_GROUP = os.environ.get(consumer_grp_identifier, consumer_grp_default)
tok_input_topic_default = 'anuvaad-dp-tools-fc-input-v1'
tok_input_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_FC_INPUT'
tok_input_topic = os.environ.get(tok_input_topic_identifier, tok_input_topic_default)
tok_output_topic_default = 'anuvaad-dp-tools-fc-output-v1'
tok_output_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_FC_OUTPUT'
tok_output_topic = os.environ.get(tok_output_topic_identifier, tok_output_topic_default)
kf_local_server = 'localhost:9092'
kafka_ip_host = 'KAFKA_CLUSTER_DETAILS'
bootstrap_server = os.environ.get(kafka_ip_host, kf_local_server)
|
py
|
1a5c0feb40363c37d267f58ece23d2fd2353befb
|
####################
from Code.image_preprocessing.quality_check_image_similarity import quality_check_image_similarity
## run the pipeline
# for test
##
caps_directory= '/teams/ARAMIS/PROJECTS/CLINICA/CLINICA_datasets/temp/CAPS_ADNI_DL'
tsv= '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/tsv_files/ADNI_MCI_T1_rest.tsv'
working_dir = '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/Results/working_dir'
ref_template = '/teams/ARAMIS/PROJECTS/junhao.wen/PhD/ADNI_classification/gitlabs/AD-DL/Data/mni_icbm152_nlin_sym_09c_nifti/mni_icbm152_nlin_sym_09c/mni_icbm152_t1_tal_nlin_sym_09c.nii'
wf = quality_check_image_similarity(caps_directory, tsv, ref_template, working_directory=working_dir)
wf.run(plugin='MultiProc', plugin_args={'n_procs': 8})
|
py
|
1a5c109a8b724f0fc1f458714dfb44686d3f41b6
|
#!/usr/bin/env python3
#这个脚本用来下载pornhd网站的小电影,只下载720分辨率!
import requests
import sys
import re
import json
import os
import time
page_url = "https://www.pornhd.com/videos"
video_url_info = "https://api.pornhd.com/videos/get-download-url?videoId=%d&resolution=720"
post_headers = {
# ":authority" : "api.pornhd.com",
# ":method": "POST",
# ":scheme": "https",
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"origin": "https://www.pornhd.com",
# "referer": "https://www.pornhd.com/videos/44756/peta-jensen-is-a-fucking-perfect-bombshell-hd-porn-video"
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
download_path = "/var/video/pornhd"
def formatFloat(num):
return '{:.2f}'.format(num)
def download_video(fpath, furl):
print("begin to download this video---------------------------------->")
print("vedio file save path: %s" % fpath)
print("vedio download url: %s" % furl)
request_video = requests.get(furl, stream=True)
length = float(request_video.headers['content-length'])
with open(fpath, 'wb') as f:
count = 0
count_tmp = 0
time1 = time.time()
for chunk in request_video.iter_content(chunk_size = 512):
if chunk:
f.write(chunk)
f.flush()
count += len(chunk)
if time.time() - time1 > 2:
p = count / length * 100
speed = (count - count_tmp) / 1024 / 1024 / 2
count_tmp = count
print(fpath + ': ' + formatFloat(p) + '%' + ' Speed: ' + formatFloat(speed) + 'M/S')
time1 = time.time()
print("------------------------------------->video download finished!")
def main():
if not os.path.isdir(download_path):
os.makedirs(download_path)
vid = sys.argv[1] or 0
if vid:
try:
vid = int(vid)
except Exception as e:
print(e)
sys.exit(1)
v_url = "%s/%d" % (page_url, vid)
try:
html_res = requests.get(v_url)
except Exception as e:
print('[Error] requests send get request error!')
html_content = html_res.text
csrf_token = re.search('\w{0,}\=\=', html_content).group()
meta_name = re.search('<meta name="og:url"(.*?)">', html_content).group()
if not csrf_token or not meta_name:
print("[Error] parse html goes error! Please Check!")
sys.exit(1)
meta_list = meta_name.split('"')
if len(meta_list) < 3:
print(meta_name)
print("[Error] meta info parse error! Please check!")
sys.exit(1)
video_url = meta_list[3] or None
if not video_url:
print(meta_list)
print("[Error] video_url cant be parsed from meta_list! Please check!")
sys.exit(1)
post_headers["referer"] = video_url
video_name_list = video_url.split('/')
video_name = video_name_list[-1]
post_url = video_url_info % vid
r = requests.post(post_url, data={'_csrf-frontend':csrf_token, 'domain': 'www.pornhd.com', '_jwt':'' }, headers=post_headers)
# print("----------------------------------------------------------------------")
# print(r.status_code)
# print(r.headers)
# print(r.text.encode('unicode_escape').decode('utf-8'))
# print("----------------------------------------------------------------------")
if r.status_code == 200:
res_dict = json.loads(r.text)
if res_dict.get('status') == 'success':
video_download_url = res_dict.get('result') or None
if video_download_url:
f_name = "%s/%s.mp4" % (download_path, video_name)
download_video(f_name, video_download_url)
else:
print("[Error] result field is empty!")
sys.exit(1)
else:
print('[Error] status is not success!')
sys.exit(1)
else:
print('[Error] status code is not 200!')
sys.exit(1)
else:
print('[Error] vid is None!')
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('[Error] params passwd error!')
sys.exit(1)
main()
|
py
|
1a5c110c6fd65c2edc2b3617ec08ca96d112137a
|
from .main import *
__version__ = "0.0.1"
|
py
|
1a5c11f3f9b01d911f964a3643bc6c185ada95fc
|
import vtk
from heartFEM.lcleeHeart import vtk_py as vtk_py
import dolfin as dolfin
import numpy as np
def extractFeNiCsBiVFacet(ugrid,savePath='', geometry="BiV", tol=1e-2):
#tol = 1e-2
#ugrid = vtk_py.readUGrid(meshfilename)
# Extract surface
geom = vtk.vtkGeometryFilter()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
geom.SetInput(ugrid)
else:
geom.SetInputData(ugrid)
geom.Update()
surf = geom.GetOutput()
bc_pts_locator = []
bc_pts = []
bc_pts_range = []
bc_pts_map = []
# Extract Surface Normal
normal = vtk.vtkPolyDataNormals()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
normal.SetInput(surf)
else:
normal.SetInputData(surf)
normal.ComputeCellNormalsOn()
normal.Update()
surf_w_norm = normal.GetOutput()
#vtk_py.writePData(normal.GetOutput(), "normal.vtk")
zmax = surf_w_norm.GetBounds()[5]
surf_w_norm.BuildLinks()
idlist = vtk.vtkIdList()
basecellidlist = vtk.vtkIdTypeArray()
basesurf = vtk.vtkPolyData()
for p in range(0, surf_w_norm.GetNumberOfCells()):
zvec = surf_w_norm.GetCellData().GetNormals().GetTuple3(p)[2]
surf_w_norm.GetCellPoints(p, idlist)
zpos = surf_w_norm.GetPoints().GetPoint(idlist.GetId(0))[2]
if((abs(zvec - 1.0) < tol or abs(zvec + 1.0) < tol) and (abs(zmax - zpos) < tol)):
surf_w_norm.DeleteCell(p)
basecellidlist.InsertNextValue(p)
basesurf = vtk_py.extractCellFromPData(basecellidlist, surf)
baseptlocator = vtk.vtkPointLocator()
baseptlocator.SetDataSet(basesurf)
baseptlocator.BuildLocator()
#######################################################################
surf_w_norm.RemoveDeletedCells()
cleanpdata = vtk.vtkCleanPolyData()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
cleanpdata.SetInput(surf_w_norm)
else:
cleanpdata.SetInputData(surf_w_norm)
cleanpdata.Update()
connfilter = vtk.vtkPolyDataConnectivityFilter()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
connfilter.SetInput(cleanpdata.GetOutput())
else:
connfilter.SetInputData(cleanpdata.GetOutput())
connfilter.Update()
print ("Total_num_points = ", cleanpdata.GetOutput().GetNumberOfPoints())
tpt = 0
if(geometry=="BiV"):
nsurf = 3
else:
nsurf = 2
for p in range(0,nsurf):
pts = vtk.vtkPolyData()
connfilter.SetExtractionModeToSpecifiedRegions()
[connfilter.DeleteSpecifiedRegion(k) for k in range(0,nsurf)]
connfilter.AddSpecifiedRegion(p)
connfilter.ScalarConnectivityOff()
connfilter.FullScalarConnectivityOff()
connfilter.Update()
cleanpdata2 = vtk.vtkCleanPolyData()
if(vtk.vtkVersion().GetVTKMajorVersion() < 6):
cleanpdata2.SetInput(connfilter.GetOutput())
else:
cleanpdata2.SetInputData(connfilter.GetOutput())
cleanpdata2.Update()
pts.DeepCopy(cleanpdata2.GetOutput())
tpt = tpt + cleanpdata2.GetOutput().GetNumberOfPoints()
ptlocator = vtk.vtkPointLocator()
ptlocator.SetDataSet(pts)
ptlocator.BuildLocator()
bc_pts_locator.append(ptlocator)
bc_pts.append(pts)
bc_pts_range.append([abs(pts.GetBounds()[k+1] - pts.GetBounds()[k]) for k in range(0, 6, 2)])
#vtk_py.writePData(connfilter.GetOutput(), "/home/likchuan/Research/fenicsheartmesh/ellipsoidal/Geometry/test.vtk")
print ("Total_num_points = ", tpt)
Epiid = np.argmax(np.array([max(pts) for pts in bc_pts_range]))
maxzrank = np.array([pts[2] for pts in bc_pts_range]).argsort()
if(geometry=="BiV"):
LVid = maxzrank[1]
RVid = 3 - (LVid + Epiid)
bc_pts_map = [4, 4, 4, 4]
bc_pts_map[Epiid] = 1; bc_pts_map[LVid] = 2; bc_pts_map[RVid] = 3
baseid = 3;
else:
LVid = maxzrank[0]
bc_pts_map = [4, 4, 4]
bc_pts_map[Epiid] = 1; bc_pts_map[LVid] = 2
baseid = 2;
bc_pts_locator.append(baseptlocator)
bc_pts.append(basesurf)
dolfin_mesh = vtk_py.convertUGridToXMLMesh(ugrid)
#dolfin_facets = dolfin.FacetFunction('size_t', dolfin_mesh)
dolfin_facets = dolfin.MeshFunction('size_t', dolfin_mesh,dolfin_mesh.topology().dim()-1, dolfin_mesh.domains())
dolfin_facets.set_all(0)
for facet in dolfin.SubsetIterator(dolfin_facets, 0):
for locator in range(0,nsurf+1):
cnt = 0
for p in range(0,3):
v0 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(0)
v1 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(1)
v2 = dolfin.Vertex(dolfin_mesh, facet.entities(0)[p]).x(2)
ptid = bc_pts_locator[locator].FindClosestPoint(v0, v1, v2)
x0 = bc_pts[locator].GetPoints().GetPoint(ptid)
dist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], x0)
if(dist < 1e-5*tol):
cnt = cnt + 1
if(cnt == 3):
dolfin_facets[facet] = bc_pts_map[locator]
#dolfin_edges = dolfin.EdgeFunction('size_t', dolfin_mesh)
dolfin_edges = dolfin.MeshFunction('size_t', dolfin_mesh,1, dolfin_mesh.domains())
dolfin_edges.set_all(0)
epilocator = Epiid
lvendolocator = LVid
for edge in dolfin.SubsetIterator(dolfin_edges, 0):
cnt_epi = 0; cnt_lvendo = 0;
for p in range(0,2):
v0 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(0)
v1 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(1)
v2 = dolfin.Vertex(dolfin_mesh, edge.entities(0)[p]).x(2)
epiptid = bc_pts_locator[epilocator].FindClosestPoint(v0, v1, v2)
epix0 = bc_pts[epilocator].GetPoints().GetPoint(epiptid)
epidist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], epix0)
topptid = bc_pts_locator[baseid].FindClosestPoint(v0, v1, v2)
topx0 = bc_pts[baseid].GetPoints().GetPoint(topptid)
topdist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], topx0)
lvendoptid = bc_pts_locator[lvendolocator].FindClosestPoint(v0, v1, v2)
lvendox0 = bc_pts[lvendolocator].GetPoints().GetPoint(lvendoptid)
lvendodist = vtk.vtkMath.Distance2BetweenPoints([v0,v1,v2], lvendox0)
if(topdist < 1e-5*tol and epidist < 1e-5*tol):
cnt_epi = cnt_epi + 1
if(topdist < 1e-5*tol and lvendodist < 1e-5*tol):
cnt_lvendo = cnt_lvendo + 1
if(cnt_epi == 2):
dolfin_edges[edge] = 1
if(cnt_lvendo == 2):
dolfin_edges[edge] = 2
dolfin.File(savePath+"temp.pvd") << dolfin_facets
return dolfin_mesh, dolfin_facets, dolfin_edges
|
py
|
1a5c1229142619daf2fb48e17a71a86c74381809
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/steffy/mobile_robots/catkin_ws/devel/include".split(';') if "/home/steffy/mobile_robots/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;rospy;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "beginner_tutorials"
PROJECT_SPACE_DIR = "/home/steffy/mobile_robots/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
py
|
1a5c134e3a645b8b530c52ae1b4706b6e1e5c064
|
import pytest
from http import HTTPStatus
from django.test import Client
from django.urls import reverse
from luz.base.django_assertions import assertion_contains
@pytest.fixture
def url():
return reverse('graph', args=('chartjs',))
@pytest.fixture
def response(client: Client, url, db):
return client.get(url)
def test_page_status_code_200(response):
assert response.status_code == HTTPStatus.OK
def test_reverse(url):
assert '/graph/chartjs' == url
def test_content(response):
assertion_contains(response, 'Chartjs')
def test_graph_navbar(response):
assertion_contains(response, '<a class="nav-link active" href="/graph/chartjs">Chartjs</a>')
|
py
|
1a5c139940a8ea43e65cdeeb823c6c25ea73274d
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-08-09 11:39:25
import json
import time
import datetime
from tornado import gen
import re
import os
import config
from base import *
import sqlite3
from backup import DBnew
import codecs
import requests
import traceback
from funcs import pusher
def tostr(s):
if isinstance(s, bytearray):
return str(s)
return s
class UserRegPush(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
self.render('user_register_pusher.html', userid=userid)
@tornado.web.authenticated
def post(self, userid):
env = json.loads(self.request.body_arguments['env'][0])
token = env["wxpusher_token"]
uid = env["wxpusher_uid"]
skey = env["skey"]
barkurl = env["barkurl"]
qywx_token = env["qywx_token"]
log = ""
if ("reg" == self.request.body_arguments['func'][0]):
try:
if (token != "") and (uid != ""):
temp = token + ";" + uid
self.db.user.mod(userid, wxpusher = temp)
if (self.db.user.get(userid, fields=("wxpusher"))["wxpusher"] == temp):
log = u"注册 wxpusher 成功\r\n"
else:
log = u"注册 wxpusher 失败\r\n"
else:
log = u"wxpusher 未填写完整\r\n"
if (skey != ""):
self.db.user.mod(userid, skey = skey)
if (self.db.user.get(userid, fields=("skey"))["skey"] == skey):
log = log+u"注册 S酱 成功\r\n"
else:
log = log+u"注册 S酱 失败\r\n"
else:
log = log+u"skey 未填写完整\r\n"
if (barkurl != ""):
if (barkurl[-1] != '/'):
barkurl=barkurl+'/'
self.db.user.mod(userid, barkurl = barkurl)
if (self.db.user.get(userid, fields=("barkurl"))["barkurl"] == barkurl):
log = log+u"注册 Bark 成功\r\n"
else:
log = log+u"注册 Bark 失败\r\n"
else:
log = log+u"Bark 未填写完整\r\n"
if (qywx_token != ""):
self.db.user.mod(userid, qywx_token = qywx_token)
if (self.db.user.get(userid, fields=("qywx_token"))["qywx_token"] == qywx_token):
log = log+u"注册 企业微信 成功\r\n"
else:
log = log+u"注册 企业微信 失败\r\n"
else:
log = log+u"企业微信 未填写完整\r\n"
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
else:
try:
f = pusher()
t = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
if (token != "") and (uid != ""):
f.send2wxpusher("{0};{1}".format(token, uid),u"{t} 发送测试".format(t=t))
log = u"wxpusher 已推送,请检查是否收到\r\n"
else:
log = u"wxpusher 未填写完整\r\n"
if (skey != ""):
f.send2s(skey, u"正在测试S酱", u"{t} 发送测试".format(t=t))
log = log+u"S酱 已推送,请检查是否收到\r\n"
else:
log = log+u"skey 未填写完整\r\n"
if (barkurl != ""):
f.send2bark(barkurl, u"正在测试Bark", u"{t} 发送测试".format(t=t))
log = log+u"Bark 已推送,请检查是否收到\r\n"
else:
log = log+u"Bark 未填写完整\r\n"
if (qywx_token != ""):
f.qywx_pusher_send(qywx_token, "正在测试企业微信", u"{t} 发送测试".format(t=t))
log = log+u"企业微信 已推送,请检查是否收到\r\n"
else:
log = log+u"企业微信 未填写完整\r\n"
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
class UserRegPushSw(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
temp = temp['noticeflg']
flg = {}
flg['barksw'] = False if ((temp & 0x040) == 0) else True
flg['schansw'] = False if ((temp & 0x020) == 0) else True
flg['wxpushersw'] = False if ((temp & 0x010) == 0) else True
flg['mailpushersw'] = False if ((temp & 0x080) == 0) else True
flg['cuspushersw'] = False if ((temp & 0x100) == 0) else True
flg['qywxpushersw'] = False if ((temp & 0x200) == 0) else True
flg['handpush_succ'] = False if ((temp & 0x008) == 0) else True
flg['handpush_fail'] = False if ((temp & 0x004) == 0) else True
flg['autopush_succ'] = False if ((temp & 0x002) == 0) else True
flg['autopush_fail'] = False if ((temp & 0x001) == 0) else True
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'schanEN' not in logtime:logtime['schanEN'] = False
if 'WXPEn' not in logtime:logtime['WXPEn'] = False
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
self.render('user_register_pushsw.html', userid=userid, flg=flg, tasks=tasks, logtime=logtime)
@tornado.web.authenticated
def post(self, userid):
try:
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'ctime', 'pushsw'), limit=None):
tpl = self.db.tpl.get(task['tplid'], fields=('id', 'userid', 'sitename', 'siteurl', 'banner', 'note') )
task['tpl'] = tpl
task['pushsw'] = json.loads(task['pushsw'])
task['pushsw']["logen"] = False
task['pushsw']["pushen"] = False
tasks.append(task)
temp = self.db.user.get(userid, fields=('noticeflg'))
env = json.loads(self.request.body_arguments['env'][0])
logtime = json.loads(self.db.user.get(userid, fields=('logtime'))['logtime'])
if 'ErrTolerateCnt' not in logtime:logtime['ErrTolerateCnt'] = 0
if (logtime['ErrTolerateCnt'] != int(env['ErrTolerateCnt'])):
logtime['ErrTolerateCnt'] = int(env['ErrTolerateCnt'])
self.db.user.mod(userid, logtime=json.dumps(logtime))
barksw_flg = 1 if ("barksw" in env) else 0
schansw_flg = 1 if ("schansw" in env) else 0
wxpushersw_flg = 1 if ("wxpushersw" in env) else 0
mailpushersw_flg = 1 if ("mailpushersw" in env) else 0
cuspushersw_flg = 1 if ("cuspushersw" in env) else 0
qywxpushersw_flg = 1 if ("qywxpushersw" in env) else 0
handpush_succ_flg = 1 if ("handpush_succ" in env) else 0
handpush_fail_flg = 1 if ("handpush_fail" in env) else 0
autopush_succ_flg = 1 if ("autopush_succ" in env) else 0
autopush_fail_flg = 1 if ("autopush_fail" in env) else 0
flg = (qywxpushersw_flg << 9) \
| (cuspushersw_flg << 8) \
| (mailpushersw_flg << 7) \
| (barksw_flg << 6) \
| (schansw_flg << 5) \
| (wxpushersw_flg << 4) \
| (handpush_succ_flg << 3) \
| (handpush_fail_flg << 2) \
| (autopush_succ_flg << 1) \
| (autopush_fail_flg)
for e in env:
temp = re.findall(r"(.+?)pushen", e)
if len(temp) > 0:
taskid = int(temp[0])
for task in tasks:
if (taskid == task["id"]):
task['pushsw']["pushen"] = True
self.db.user.mod(userid, noticeflg=flg)
for task in tasks:
self.db.task.mod(task["id"], pushsw=json.dumps(task['pushsw']))
except Exception as e:
self.render('tpl_run_failed.html', log=e)
return
self.render('utils_run_result.html', log=u"设置完成", title=u'设置成功', flg='success')
return
class UserManagerHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
users = []
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
users = []
for user in self.db.user.list(fields=('id','status', 'role', 'ctime', 'email', 'atime', 'email_verified')):
if (user['email_verified'] == 0):
user['email_verified'] = False
else:
user['email_verified'] = True
users.append(user)
self.render("user_manage.html", users=users, userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd):
Target_users = []
for key, value in envs.items():
if value[0] == "on":
Target_users.append(key)
for sub_user in Target_users:
if (self.db.user.get(sub_user, fields=('role')) != 'admin'):
if 'banbtn' in envs:
self.db.user.mod(sub_user, status='Disable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=True)
if 'activatebtn' in envs:
self.db.user.mod(sub_user, status='Enable')
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.mod(task['id'], disabled=False)
if 'delbtn' in envs:
for task in self.db.task.list(sub_user, fields=('id'), limit=None):
self.db.task.delete(task['id'])
logs = self.db.tasklog.list(taskid = task['id'], fields=('id'))
for log in logs:
self.db.tasklog.delete(log['id'])
for tpl in self.db.tpl.list(fields=('id', 'userid'), limit=None):
if tpl['userid'] == int(sub_user):
self.db.tpl.delete(tpl['id'])
self.db.user.delete(sub_user)
else:
raise Exception(u"账号/密码错误")
else:
raise Exception(u"非管理员,不可操作")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
self.redirect('/my/')
return
class UserDBHandler(BaseHandler):
@tornado.web.authenticated
def get(self, userid):
adminflg = False
user = self.db.user.get(userid, fields=('role'))
if user and user['role'] == "admin":
adminflg = True
self.render("DB_manage.html", userid=userid, adminflg=adminflg)
return
@tornado.web.authenticated
def post(self, userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
now=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('backupbtn' in envs):
if user and user['role'] == "admin":
filename = config.sqlite3.path
savename = "database_{now}.db".format(now=now)
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(filename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
self.finish()
return
else:
raise Exception(u"管理员才能备份数据库")
if ('backuptplsbtn' in envs):
tpls = []
for tpl in self.db.tpl.list(userid=userid, fields=('id', 'siteurl', 'sitename', 'banner', 'note','fork', 'groups', 'har', 'tpl', 'variables'), limit=None):
tpl['tpl'] = self.db.user.decrypt(userid, tpl['tpl'])
tpl['har'] = self.db.user.decrypt(userid, tpl['har'])
tpls.append(tpl)
tasks = []
for task in self.db.task.list(userid, fields=('id', 'tplid', 'note', 'disabled', 'groups', 'init_env', 'env', 'ontimeflg', 'ontime', 'pushsw', 'newontime'), limit=None):
task['init_env'] = self.db.user.decrypt(userid, task['init_env'])
task['env'] = self.db.user.decrypt(userid, task['env']) if task['env'] else None
tasks.append(task)
backupdata = {}
backupdata['tpls'] = tpls
backupdata['tasks'] = tasks
savename = "{mail}_{now}.json".format(mail = user['email'], now=now)
fp = codecs.open(savename, 'w', 'utf-8')
fp.write(json.dumps(backupdata, ensure_ascii=False, indent=4 ))
fp.close()
self.set_header ('Content-Type', 'application/octet-stream')
self.set_header ('Content-Disposition', 'attachment; filename='+savename)
with open(savename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
self.write(data)
os.remove(savename)
self.finish()
return
if ('recoverytplsbtn' in envs):
if ('recfile' in envs):
tpls = json.loads(envs['recfile'][0])['tpls']
tasks = json.loads(envs['recfile'][0])['tasks']
ids = []
for newtpl in tpls:
userid2 = int(userid)
har = self.db.user.encrypt(userid2, newtpl['har'])
tpl = self.db.user.encrypt(userid2, newtpl['tpl'])
variables = newtpl['variables']
newid = self.db.tpl.add(userid2, har, tpl, variables)
self.db.tpl.mod(newid, fork = newtpl['fork'],
siteurl = newtpl['siteurl'],
sitename = newtpl['sitename'],
note = newtpl['note'],
groups = u'备份还原',
banner = newtpl['banner']
)
for task in tasks:
if (task['tplid'] == newtpl['id']):
task['tplid'] = newid
for newtask in tasks:
userid2 = int(userid)
newtask['init_env'] = self.db.user.encrypt(userid2, newtask['init_env'])
newtask['env'] = self.db.user.encrypt(userid2, newtask['env'])
taskid = self.db.task.add(newtask['tplid'], userid, newtask['env'])
self.db.task.mod(taskid, disabled = newtask['disabled'],
init_env = newtask['init_env'],
session = None,
note = newtask['note'],
groups = u'备份还原',
ontimeflg = newtask['ontimeflg'],
ontime = newtask['ontime'],
pushsw = newtask['pushsw'],
newontime = newtask['newontime']
)
self.render('utils_run_result.html', log=u"设置完成", title=u'设置成功', flg='success')
return
else:
raise Exception(u"请上传文件")
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class toolbox_notpad_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
user = self.current_user
text_data = self.db.user.get(userid, fields=('notepad'))['notepad']
self.render('toolbox-notepad.html', text_data = text_data, userid=userid)
return
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
if ('mode' in envs) and ('content' in envs):
if (envs['mode'][0] == 'write'):
new_data = envs['content'][0]
else:
data = self.db.user.get(userid, fields=('notepad'))['notepad']
new_data = data + "\r\n" +envs['content'][0]
self.db.user.mod(userid, notepad=new_data)
else:
raise Exception(u"参数错误")
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class UserPushShowPvar(BaseHandler):
@tornado.web.authenticated
def post(self,userid):
try:
user = self.db.user.get(userid, fields=('role', 'email'))
envs = self.request.body_arguments
mail = envs['adminmail'][0]
pwd = u"{0}".format(envs['adminpwd'][0])
if self.db.user.challenge(mail, pwd) and (user['email'] == mail):
key = self.db.user.get(userid, fields=("barkurl", 'skey', 'wxpusher', 'qywx_token'))
log = u"""barkurl 前值:{bark}\r\nskey 前值:{skey}\r\nwxpusher 前值:{wxpusher}\r\n企业微信 前值:{qywx_token}""".format(
bark = key['barkurl'],
skey = key['skey'],
wxpusher = key['wxpusher'],
qywx_token = key['qywx_token'])
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return log
else:
raise Exception(u"账号/密码错误")
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
self.render('tpl_run_failed.html', log=e)
return
return
class custom_pusher_Handler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
diypusher = self.db.user.get(userid, fields=('diypusher'))['diypusher']
diypusher = json.loads(diypusher) if (diypusher != '') else {'mode':'GET'}
self.render('user_register_cus_pusher.html', userid=userid, diypusher=diypusher)
return
@tornado.web.authenticated
def post(self,userid):
try:
envs = self.request.body_arguments
for env in envs.keys():
envs[env] = envs[env][0]
req = pusher()
log = ''
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
tmp = req.cus_pusher_send(envs ,u'推送测试', now)
if ('True' == tmp):
if (envs['btn'] == 'regbtn'):
self.db.user.mod(userid, diypusher=json.dumps(envs))
else:
raise Exception(tmp)
log = u'运行成功,请检查是否收到推送'
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
traceback.print_exc()
self.render('utils_run_result.html', log=traceback.format_exc(), title=u'设置失败', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
class UserSetNewPWDHandler(BaseHandler):
@tornado.web.authenticated
def get(self,userid):
email = self.db.user.get(userid, fields=('email'))['email']
self.render('user_setnewpwd.html', userid=userid, usermail=email)
return
@tornado.web.authenticated
def post(self,userid):
try:
log = u'设置成功'
envs = self.request.body_arguments
for env in envs.keys():
envs[env] = u'{0}'.format(envs[env][0])
adminuser = self.db.user.get(email=envs['管理员邮箱'], fields=('role', 'email'))
newPWD = envs['新密码']
if self.db.user.challenge(envs['管理员邮箱'], envs['管理员密码']) and (adminuser['role'] == 'admin'):
if (len(newPWD) >= 6):
self.db.user.mod(userid, password=newPWD)
if not (self.db.user.challenge(envs['用户名'], newPWD)):
raise Exception(u'修改失败')
else:
raise Exception(u'密码长度要大于6位')
else:
raise Exception(u'管理员用户名/密码错误')
except Exception as e:
if (str(e).find('get user need id or email') > -1):
e = u'请输入用户名/密码'
traceback.print_exc()
self.render('utils_run_result.html', log=traceback.format_exc(), title=u'设置失败', flg='danger')
return
self.render('utils_run_result.html', log=log, title=u'设置成功', flg='success')
return
handlers = [
('/user/(\d+)/pushsw', UserRegPushSw),
('/user/(\d+)/regpush', UserRegPush),
('/user/(\d+)/UserPushShowPvar', UserPushShowPvar),
('/user/(\d+)/manage', UserManagerHandler),
('/user/(\d+)/database', UserDBHandler),
('/util/toolbox/(\d+)/notepad', toolbox_notpad_Handler),
('/util/custom/(\d+)/pusher', custom_pusher_Handler),
('/user/(\d+)/setnewpwd', UserSetNewPWDHandler),
]
|
py
|
1a5c13a98561bf55849b36eecb3c0906175a5333
|
from nornir import InitNornir
from nornir.core.filter import F
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config.yaml")
tmp_nr = nr.filter(name="sros1")
tmp_nr = nr.filter(platform="nokia_sros")
tmp_nr = nr.filter(hostname="vmx1.lasthop.io")
sros = nr.filter(F(groups__contains="sros"))
all_devices = nr.filter(F(groups__contains="sros") | F(groups__contains="junos"))
|
py
|
1a5c13ebf160702b18d0ad717491135a7a829d53
|
import itertools
from collections import defaultdict, namedtuple
from copy import copy
from .block import Block
from .common import Tagged, fail
from .errors import Errors
from .env import env
from .graph import Graph
from .loop import LoopNestTree
from .symbol import Symbol
from .synth import make_synth_params
from .type import Type
from .irvisitor import IRVisitor
from .ir import CONST, JUMP, CJUMP, MCJUMP, PHIBase
from .signal import Signal
from logging import getLogger
logger = getLogger(__name__)
FunctionParam = namedtuple('FunctionParam', ('sym', 'copy', 'defval'))
class Scope(Tagged):
ordered_scopes = []
TAGS = {
'global', 'function', 'class', 'method', 'ctor',
'callable', 'returnable', 'mutable', 'inherited', 'predicate',
'testbench', 'pure',
'module', 'worker', 'instantiated',
'lib', 'namespace', 'builtin', 'decorator',
'port', 'typeclass',
'function_module',
'inlinelib',
'package', 'directory'
}
scope_id = 0
@classmethod
def create(cls, parent, name, tags, lineno=0, origin=None):
if name is None:
name = "unnamed_scope" + str(cls.scope_id)
s = Scope(parent, name, tags, lineno, cls.scope_id)
if s.name in env.scopes:
env.append_scope(s)
fail((env.scope_file_map[s], lineno), Errors.REDEFINED_NAME, {name})
env.append_scope(s)
if origin:
s.origin = origin
env.scope_file_map[s] = env.scope_file_map[origin]
cls.scope_id += 1
return s
@classmethod
def create_namespace(cls, parent, name, tags, path=None):
tags |= {'namespace'}
namespace = Scope.create(parent, name, tags, lineno=1)
namesym = namespace.add_sym('__name__', typ=Type.str_t)
if namespace.is_global():
namespace.constants[namesym] = CONST('__main__')
else:
namespace.constants[namesym] = CONST(namespace.name)
if path:
filesym = namespace.add_sym('__file__', typ=Type.str_t)
namespace.constants[filesym] = CONST(path)
return namespace
@classmethod
def destroy(cls, scope):
assert scope.name in env.scopes
env.remove_scope(scope)
@classmethod
def get_scopes(cls, bottom_up=True, with_global=False, with_class=False, with_lib=False):
def ret_helper():
scopes = cls.ordered_scopes[:]
scopes = [s for s in scopes if not s.is_pure()]
# Exclude an no code scope
scopes = [s for s in scopes
if not (s.is_lib() and s.is_function())
and not (s.is_lib() and s.is_method())
and not s.is_builtin()
and not s.is_decorator()
and not s.is_typeclass()
and not s.is_directory()]
if not with_global:
scopes.remove(Scope.global_scope())
if not with_class:
scopes = [s for s in scopes if not s.is_class()]
if not with_lib:
scopes = [s for s in scopes if not s.is_lib()]
if bottom_up:
scopes.reverse()
return scopes
cls.reorder_scopes()
cls.ordered_scopes = sorted(env.scopes.values())
return ret_helper()
@classmethod
def reorder_scopes(cls):
# hierarchical order
def set_h_order(scope, order):
if order > scope.order[0]:
scope.order = (order, -1)
else:
return
order += 1
for s in scope.children:
set_h_order(s, order)
for s in env.scopes.values():
if s.is_namespace():
s.order = (0, 0)
for f in s.children:
set_h_order(f, 1)
if env.depend_graph:
nodes = env.depend_graph.bfs_ordered_nodes()
for s in nodes:
d_order = nodes.index(s)
preds = env.depend_graph.preds(s)
if preds:
preds_max_order = max([nodes.index(p) for p in preds])
else:
preds_max_order = 0
if d_order < preds_max_order:
s.order = (s.order[0], d_order)
else:
s.order = (s.order[0], preds_max_order + 1)
@classmethod
def get_class_scopes(cls, bottom_up=True):
return [s for s in cls.get_scopes(bottom_up=bottom_up, with_class=True) if s.is_class()]
@classmethod
def global_scope(cls):
return env.scopes[env.global_scope_name]
@classmethod
def is_unremovable(cls, s):
return s.is_instantiated() or (s.parent and s.parent.is_instantiated())
def __init__(self, parent, name, tags, lineno, scope_id):
super().__init__(tags)
self.name = name
self.orig_name = name
self.parent = parent
if parent:
self.name = parent.name + "." + name
parent.append_child(self)
self.lineno = lineno
self.scope_id = scope_id
self.symbols = {}
self.params = []
self.return_type = None
self.entry_block = None
self.exit_block = None
self.children = []
self.bases = []
self.origin = None
self.subs = []
self.usedef = None
self.loop_tree = LoopNestTree()
self.callee_instances = defaultdict(set)
#self.stgs = []
self.order = (-1, -1)
self.block_count = 0
self.workers = []
self.worker_owner = None
self.asap_latency = -1
self.type_args = []
self.synth_params = make_synth_params()
self.constants = {}
self.branch_graph = Graph()
def __str__(self):
s = '\n================================\n'
tags = ", ".join([att for att in self.tags])
if self.parent:
s += "Scope: {}, parent={} ({})\n".format(self.orig_name, self.parent.name, tags)
else:
s += "Scope: {} ({})\n".format(self.orig_name, tags)
s += ", ".join([str(sym) for sym in self.symbols])
s += "\n"
s += '================================\n'
s += 'Parameters\n'
for p, _, val in self.params:
if val:
s += '{}:{} = {}\n'.format(p, repr(p.typ), val)
else:
s += '{}:{}\n'.format(p, repr(p.typ))
s += "\n"
s += 'Return\n'
if self.return_type:
s += '{}\n'.format(repr(self.return_type))
else:
s += 'None\n'
s += 'Synthesis\n{}\n'.format(self.synth_params)
s += '================================\n'
for blk in self.traverse_blocks():
s += str(blk)
s += '================================\n'
for r in self.loop_tree.traverse():
s += str(r)
s += '================================\n'
return s
def __repr__(self):
return self.name
def __lt__(self, other):
if self.order < other.order:
return True
elif self.order > other.order:
return False
elif self.order == other.order:
return self.lineno < other.lineno
def clone_symbols(self, scope, postfix=''):
symbol_map = {}
for orig_sym in self.symbols.values():
new_sym = orig_sym.clone(scope, postfix)
assert new_sym.name not in scope.symbols
scope.symbols[new_sym.name] = new_sym
symbol_map[orig_sym] = new_sym
return symbol_map
def clone_blocks(self, scope):
block_map = {}
stm_map = {}
for b in self.traverse_blocks():
block_map[b] = b.clone(scope, stm_map)
for b in self.traverse_blocks():
b_clone = block_map[b]
b_clone.reconnect(block_map)
# jump target
for stm in stm_map.values():
if stm.is_a(JUMP):
stm.target = block_map[stm.target]
elif stm.is_a(CJUMP):
stm.true = block_map[stm.true]
stm.false = block_map[stm.false]
elif stm.is_a(MCJUMP):
stm.targets = [block_map[t] for t in stm.targets]
return block_map, stm_map
def clone(self, prefix, postfix, parent=None):
#if self.is_lib():
# return
name = prefix + '_' if prefix else ''
name += self.orig_name
name = name + '_' + postfix if postfix else name
parent = self.parent if parent is None else parent
s = Scope.create(parent, name, set(self.tags), self.lineno, origin=self)
logger.debug('CLONE {} {}'.format(self.name, s.name))
s.children = list(self.children)
# TODO: should be reconsidered the owned policy
#for child in s.children:
# child.parent = s
s.bases = list(self.bases)
s.subs = list(self.subs)
s.type_args = list(self.type_args)
symbol_map = self.clone_symbols(s)
s.params = []
for p, cp, defval in self.params:
param = FunctionParam(symbol_map[p],
symbol_map[cp],
defval.clone() if defval else None)
s.params.append(param)
s.return_type = self.return_type
block_map, stm_map = self.clone_blocks(s)
s.entry_block = block_map[self.entry_block]
s.exit_block = block_map[self.exit_block]
s.usedef = None
for n in self.branch_graph.nodes:
if n in stm_map:
new_n = stm_map[n]
s.branch_graph.add_node(new_n)
for n0, n1, _ in self.branch_graph.edges:
if n0 in stm_map and n1 in stm_map:
new_n0 = stm_map[n0]
new_n1 = stm_map[n1]
if new_n0 < new_n1:
s.branch_graph.add_edge(new_n0, new_n1)
else:
s.branch_graph.add_edge(new_n1, new_n0)
if self.is_function_module():
new_callee_instances = defaultdict(set)
for func_sym, inst_names in self.callee_instances.items():
new_func_sym = symbol_map[func_sym]
new_callee_instances[new_func_sym] = copy(inst_names)
s.callee_instances = new_callee_instances
s.order = self.order
sym_replacer = SymbolReplacer(symbol_map)
sym_replacer.process(s)
#s.parent.append_child(s)
#env.append_scope(s)
s.cloned_symbols = symbol_map
s.cloned_blocks = block_map
s.cloned_stms = stm_map
s.synth_params = self.synth_params.copy()
# TODO:
#s.loop_tree = None
#s.constants
return s
def inherit(self, name, overrides):
sub = Scope.create(self.parent, name, set(self.tags), self.lineno, origin=self)
sub.bases.append(self)
sub.symbols = copy(self.symbols)
sub.workers = copy(self.workers)
sub.children = copy(self.children)
sub.exit_block = sub.entry_block = Block(sub)
sub.add_tag('inherited')
#env.append_scope(sub)
self.subs.append(sub)
for method in overrides:
sub.children.remove(method)
sub_method = method.clone('', '', sub)
_in_self_sym, self_sym, _ = sub_method.params[0]
assert self_sym.name == 'self'
assert self_sym.typ.get_scope() is self
self_typ = Type.object(sub)
_in_self_sym.set_type(self_typ)
self_sym.set_type(self_typ)
method_sym = sub.symbols[sub_method.orig_name]
sub_method_sym = method_sym.clone(sub)
sub_method_sym.typ.set_scope(sub_method)
sub.symbols[sub_method.orig_name] = sub_method_sym
return sub
def find_child(self, name):
for child in self.children:
if child.orig_name == name:
return child
return None
def find_parent_scope(self, name):
if self.find_child(name):
return self
elif self.parent:
return self.parent.find_parent_scope(name)
else:
return None
def find_scope(self, name):
if self.orig_name == name:
return self
child = self.find_child(name)
if child:
return child
if self.parent:
return self.parent.find_scope(name)
return None
def add_sym(self, name, tags=None, typ=Type.undef_t):
if name in self.symbols:
raise RuntimeError("symbol '{}' is already registered ".format(name))
sym = Symbol(name, self, tags, typ)
self.symbols[name] = sym
return sym
def add_temp(self, temp_name=None, tags=None, typ=Type.undef_t):
name = Symbol.unique_name(temp_name)
if tags:
tags.add('temp')
else:
tags = {'temp'}
return self.add_sym(name, tags, typ)
def add_condition_sym(self):
return self.add_temp(Symbol.condition_prefix, {'condition'}, typ=Type.bool_t)
def add_param_sym(self, param_name, typ=Type.undef_t):
name = '{}_{}'.format(Symbol.param_prefix, param_name)
return self.add_sym(name, {'param'}, typ)
def find_param_sym(self, param_name):
name = '{}_{}'.format(Symbol.param_prefix, param_name)
return self.find_sym(name)
def add_return_sym(self):
return self.add_sym(Symbol.return_prefix, ['return'])
def del_sym(self, name):
if name in self.symbols:
del self.symbols[name]
def import_sym(self, sym):
if sym.name in self.symbols and sym is not self.symbols[sym.name]:
raise RuntimeError("symbol '{}' is already registered ".format(sym.name))
self.symbols[sym.name] = sym
def find_sym(self, name):
names = name.split('.')
if len(names) > 1:
return self.find_sym_r(names)
if name in self.symbols:
return self.symbols[name]
elif self.parent:
if self.parent.is_class():
# look-up from bases
for base in self.bases:
found = base.find_sym(name)
if found:
break
else:
# otherwise, look-up from global
found = env.outermost_scope().find_sym(name)
if not found:
found = self.global_scope().find_sym(name)
else:
found = self.parent.find_sym(name)
return found
return None
def find_sym_r(self, names):
name = names[0]
sym = self.find_sym(name)
if sym and len(names) > 1:
if sym.typ.is_containable():
return sym.typ.get_scope().find_sym_r(names[1:])
else:
return None
return sym
def has_sym(self, name):
return name in self.symbols
def gen_sym(self, name):
if self.has_sym(name):
sym = self.symbols[name]
else:
sym = self.add_sym(name)
return sym
def rename_sym(self, old, new):
assert old in self.symbols
sym = self.symbols[old]
del self.symbols[old]
sym.name = new
self.symbols[new] = sym
return sym
def inherit_sym(self, orig_sym, new_name):
#assert orig_sym.scope is self
if self.has_sym(new_name):
new_sym = self.symbols[new_name]
else:
new_sym = self.add_sym(new_name, set(orig_sym.tags), typ=orig_sym.typ.clone())
if orig_sym.ancestor:
new_sym.ancestor = orig_sym.ancestor
else:
new_sym.ancestor = orig_sym
return new_sym
def qualified_name(self):
if self.name.startswith(env.global_scope_name):
name = self.name[len(env.global_scope_name) + 1:]
else:
name = self.name
return name.replace('.', '_')
def set_entry_block(self, blk):
assert self.entry_block is None
self.entry_block = blk
def set_exit_block(self, blk):
self.exit_block = blk
def traverse_blocks(self):
assert len(self.entry_block.preds) == 0
yield from self.entry_block.traverse()
def replace_block(self, old, new):
new.preds = old.preds[:]
new.preds_loop = old.preds_loop[:]
new.succs = old.succs[:]
new.succs_loop = old.succs_loop[:]
for blk in self.traverse_blocks():
if blk is old:
for pred in old.preds:
pred.replace_succ(old, new)
pred.replace_succ_loop(old, new)
for succ in old.succs:
succ.replace_pred(old, new)
succ.replace_pred_loop(old, new)
def append_child(self, child_scope):
if child_scope not in self.children:
self.children.append(child_scope)
def add_param(self, sym, copy, defval):
self.params.append(FunctionParam(sym, copy, defval))
def has_param(self, sym):
name = sym.name.split('#')[0]
for p, _, _ in self.params:
if p.name == name:
return True
return False
def get_param_index(self, sym):
name = sym.name.split('#')[0]
for i, (p, _, _) in enumerate(self.params):
if p.name == name:
return i
return -1
def append_callee_instance(self, callee_scope, inst_name):
self.callee_instances[callee_scope].add(inst_name)
def dfgs(self, bottom_up=False):
def collect_dfg(dfg, ds):
ds.append(dfg)
for c in dfg.children:
collect_dfg(c, ds)
ds = []
collect_dfg(self.top_dfg, ds)
return ds
def find_ctor(self):
assert self.is_class()
for child in self.children:
if child.is_ctor():
return child
return None
def is_global(self):
return self.name == env.global_scope_name
def is_containable(self):
return self.is_namespace() or self.is_class()
def is_subclassof(self, clazz):
if self is clazz:
return True
for base in self.bases:
if base is clazz:
return True
if base.is_subclassof(clazz):
return True
return False
def class_fields(self):
assert self.is_class()
class_fields = {}
if self.bases:
for base in self.bases:
fields = base.class_fields()
class_fields.update(fields)
class_fields.update(self.symbols)
return class_fields
def register_worker(self, worker_scope, worker_args):
for i, (w, _) in enumerate(self.workers[:]):
if w is worker_scope:
self.workers.pop(i)
self.workers.append((worker_scope, worker_args))
assert worker_scope.worker_owner is None or worker_scope.worker_owner is self
worker_scope.worker_owner = self
def reset_loop_tree(self):
self.loop_tree = LoopNestTree()
def top_region(self):
return self.loop_tree.root
def parent_region(self, r):
return self.loop_tree.get_parent_of(r)
def child_regions(self, r):
return self.loop_tree.get_children_of(r)
def set_top_region(self, r):
self.loop_tree.root = r
self.loop_tree.add_node(r)
def append_child_regions(self, parent, children):
for child in children:
self.loop_tree.add_edge(parent, child)
def append_sibling_region(self, r, new_r):
parent = self.loop_tree.get_parent_of(r)
self.loop_tree.add_edge(parent, new_r)
def remove_region(self, r):
parent = self.loop_tree.get_parent_of(r)
self.loop_tree.del_edge(parent, r, auto_del_node=False)
self.loop_tree.del_node(r)
def find_region(self, blk):
for r in self.loop_tree.traverse():
if blk in r.blocks():
return r
return None
def remove_block_from_region(self, blk):
if not self.loop_tree.root:
return
r = self.find_region(blk)
r.remove_body(blk)
def is_leaf_region(self, r):
return self.loop_tree.is_leaf(r)
def traverse_regions(self, reverse=False):
return self.loop_tree.traverse(reverse)
def add_branch_graph_edge(self, k, vs):
assert isinstance(vs, list)
self.branch_graph.add_node(k)
for v in itertools.chain(*vs):
if k < v:
self.branch_graph.add_edge(k, v)
else:
self.branch_graph.add_edge(v, k)
def has_branch_edge(self, stm0, stm1):
if stm0 < stm1:
return self.branch_graph.find_edge(stm0, stm1) is not None
else:
return self.branch_graph.find_edge(stm1, stm0) is not None
class SymbolReplacer(IRVisitor):
def __init__(self, sym_map):
super().__init__()
self.sym_map = sym_map
def visit_TEMP(self, ir):
if ir.sym in self.sym_map:
ir.sym = self.sym_map[ir.sym]
else:
logger.debug('WARNING: not found {}'.format(ir.sym))
def visit_ATTR(self, ir):
self.visit(ir.exp)
if ir.attr in self.sym_map:
ir.attr = self.sym_map[ir.attr]
else:
logger.debug('WARNING: not found {}'.format(ir.attr))
def visit_ARRAY(self, ir):
if ir.sym in self.sym_map:
ir.sym = self.sym_map[ir.sym]
for item in ir.items:
self.visit(item)
self.visit(ir.repeat)
def write_dot(scope, tag):
try:
import pydot
except ImportError:
raise
# force disable debug mode to simplify the caption
debug_mode = env.dev_debug_mode
env.dev_debug_mode = False
name = scope.orig_name + '_' + str(tag)
g = pydot.Dot(name, graph_type='digraph')
def get_text(blk):
s = blk.name + '\n'
for stm in blk.stms:
s += str(stm).replace('\n', '\l') + '\l'
s = s.replace(':', '_')
return s
blk_map = {blk: pydot.Node(get_text(blk), shape='box') for blk in scope.traverse_blocks()}
for n in blk_map.values():
g.add_node(n)
for blk in blk_map.keys():
from_node = blk_map[blk]
for succ in blk.succs:
to_node = blk_map[succ]
if succ in blk.succs_loop:
g.add_edge(pydot.Edge(from_node, to_node, color='red'))
else:
g.add_edge(pydot.Edge(from_node, to_node))
#for pred in blk.preds:
# to_node = blk_map[pred]
# if pred in blk.preds_loop:
# g.add_edge(pydot.Edge(from_node, to_node, style='dashed', color='red'))
# else:
# g.add_edge(pydot.Edge(from_node, to_node, style='dashed'))
g.write_png('{}/{}.png'.format(env.debug_output_dir, name))
env.dev_debug_mode = debug_mode
|
py
|
1a5c14259e3a46a80030f190f719a621a92d6b10
|
'''
A tool that generates FS API calls to generate a filesystem, and packages the files
to work with that.
This is called by emcc. You can also call it yourself.
You can split your files into "asset bundles", and create each bundle separately
with this tool. Then just include the generated js for each and they will load
the data and prepare it accordingly. This allows you to share assets and reduce
data downloads.
Usage:
file_packager.py TARGET [--preload A [B..]] [--embed C [D..]] [--exclude E [F..]] [--compress COMPRESSION_DATA] [--crunch[=X]] [--js-output=OUTPUT.js] [--no-force] [--use-preload-cache] [--no-heap-copy]
--preload ,
--embed See emcc --help for more details on those options.
--crunch=X Will compress dxt files to crn with quality level X. The crunch commandline tool must be present
and CRUNCH should be defined in ~/.emscripten that points to it. JS crunch decompressing code will
be added to convert the crn to dds in the browser.
crunch-worker.js will be generated in the current directory. You should include that file when
packaging your site.
DDS files will not be crunched if the .crn is more recent than the .dds. This prevents a lot of
unneeded computation.
--js-output=FILE Writes output in FILE, if not specified, standard output is used.
--no-force Don't create output if no valid input file is specified.
--use-preload-cache Stores package in IndexedDB so that subsequent loads don't need to do XHR. Checks package version.
--no-heap-copy If specified, the preloaded filesystem is not copied inside the Emscripten HEAP, but kept in a separate typed array outside it.
The default, if this is not specified, is to embed the VFS inside the HEAP, so that mmap()ing files in it is a no-op.
Passing this flag optimizes for fread() usage, omitting it optimizes for mmap() usage.
Notes:
* The file packager generates unix-style file paths. So if you are on windows and a file is accessed at
subdir\file, in JS it will be subdir/file. For simplicity we treat the web platform as a *NIX.
TODO: You can also provide .crn files yourself, pre-crunched. With this option, they will be decompressed
to dds files in the browser, exactly the same as if this tool compressed them.
'''
import os, sys, shutil, random, uuid, ctypes
import posixpath
import shared
from shared import Compression, execute, suffix, unsuffixed
from subprocess import Popen, PIPE, STDOUT
import fnmatch
if len(sys.argv) == 1:
print '''Usage: file_packager.py TARGET [--preload A...] [--embed B...] [--exclude C...] [--compress COMPRESSION_DATA] [--crunch[=X]] [--js-output=OUTPUT.js] [--no-force] [--use-preload-cache] [--no-heap-copy]
See the source for more details.'''
sys.exit(0)
DEBUG = os.environ.get('EMCC_DEBUG')
data_target = sys.argv[1]
IMAGE_SUFFIXES = ('.jpg', '.png', '.bmp')
AUDIO_SUFFIXES = ('.ogg', '.wav', '.mp3')
AUDIO_MIMETYPES = { 'ogg': 'audio/ogg', 'wav': 'audio/wav', 'mp3': 'audio/mpeg' }
CRUNCH_INPUT_SUFFIX = '.dds'
CRUNCH_OUTPUT_SUFFIX = '.crn'
DDS_HEADER_SIZE = 128
AV_WORKAROUND = 0 # Set to 1 to randomize file order and add some padding, to work around silly av false positives
data_files = []
excluded_patterns = []
leading = ''
has_preloaded = False
compress_cnt = 0
crunch = 0
plugins = []
jsoutput = None
force = True
# If set to True, IndexedDB (IDBFS in library_idbfs.js) is used to locally cache VFS XHR so that subsequent
# page loads can read the data from the offline cache instead.
use_preload_cache = False
# If set to True, the blob received from XHR is moved to the Emscripten HEAP, optimizing for mmap() performance.
# If set to False, the XHR blob is kept intact, and fread()s etc. are performed directly to that data. This optimizes for minimal memory usage and fread() performance.
no_heap_copy = True
for arg in sys.argv[2:]:
if arg == '--preload':
has_preloaded = True
leading = 'preload'
elif arg == '--embed':
leading = 'embed'
elif arg == '--exclude':
leading = 'exclude'
elif arg == '--compress':
compress_cnt = 1
Compression.on = True
leading = 'compress'
elif arg == '--no-force':
force = False
leading = ''
elif arg == '--use-preload-cache':
use_preload_cache = True
leading = ''
elif arg == '--no-heap-copy':
no_heap_copy = False
leading = ''
elif arg.startswith('--js-output'):
jsoutput = arg.split('=')[1] if '=' in arg else None
leading = ''
elif arg.startswith('--crunch'):
try:
from shared import CRUNCH
except Exception, e:
print >> sys.stderr, 'could not import CRUNCH (make sure it is defined properly in ~/.emscripten)'
raise e
crunch = arg.split('=')[1] if '=' in arg else '128'
leading = ''
elif arg.startswith('--plugin'):
plugin = open(arg.split('=')[1], 'r').read()
eval(plugin) # should append itself to plugins
leading = ''
elif leading == 'preload' or leading == 'embed':
mode = leading
uses_at_notation = '@' in arg
if uses_at_notation:
srcpath, dstpath = arg.split('@') # User is specifying destination filename explicitly.
else:
srcpath = dstpath = arg # Use source path as destination path.
if os.path.isfile(srcpath) or os.path.isdir(srcpath):
data_files.append({ 'srcpath': srcpath, 'dstpath': dstpath, 'mode': mode, 'explicit_dst_path': uses_at_notation })
else:
print >> sys.stderr, 'Warning: ' + arg + ' does not exist, ignoring.'
elif leading == 'exclude':
excluded_patterns.append(arg)
elif leading == 'compress':
if compress_cnt == 1:
Compression.encoder = arg
compress_cnt = 2
elif compress_cnt == 2:
Compression.decoder = arg
compress_cnt = 3
elif compress_cnt == 3:
Compression.js_name = arg
compress_cnt = 0
else:
print >> sys.stderr, 'Unknown parameter:', arg
sys.exit(1)
if (not force) and len(data_files) == 0:
has_preloaded = False
ret = '''
var Module;
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module.expectedDataFileDownloads) {
Module.expectedDataFileDownloads = 0;
Module.finishedDataFileDownloads = 0;
}
Module.expectedDataFileDownloads++;
(function() {
'''
code = '''
function assert(check, msg) {
if (!check) throw msg + new Error().stack;
}
'''
# Win32 code to test whether the given file has the hidden property set.
def has_hidden_attribute(filepath):
if sys.platform != 'win32':
return False
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
# The packager should never preload/embed files if the file is hidden (Win32).
# or it matches any pattern specified in --exclude
def should_ignore(fullname):
if has_hidden_attribute(fullname):
return True
for p in excluded_patterns:
if fnmatch.fnmatch(fullname, p):
return True
return False
# Expand directories into individual files
def add(arg, dirname, names):
# rootpathsrc: The path name of the root directory on the local FS we are adding to emscripten virtual FS.
# rootpathdst: The name we want to make the source path available on the emscripten virtual FS.
mode, rootpathsrc, rootpathdst = arg
new_names = []
for name in names:
fullname = os.path.join(dirname, name)
if should_ignore(fullname):
if DEBUG:
print >> sys.stderr, 'Skipping file "' + fullname + '" from inclusion in the emscripten virtual file system.'
else:
new_names.append(name)
if not os.path.isdir(fullname):
dstpath = os.path.join(rootpathdst, os.path.relpath(fullname, rootpathsrc)) # Convert source filename relative to root directory of target FS.
new_data_files.append({ 'srcpath': fullname, 'dstpath': dstpath, 'mode': mode, 'explicit_dst_path': True })
del names[:]
names.extend(new_names)
new_data_files = []
for file_ in data_files:
if not should_ignore(file_['srcpath']):
if os.path.isdir(file_['srcpath']):
os.path.walk(file_['srcpath'], add, [file_['mode'], file_['srcpath'], file_['dstpath']])
else:
new_data_files.append(file_)
data_files = filter(lambda file_: not os.path.isdir(file_['srcpath']), new_data_files)
if len(data_files) == 0:
print >> sys.stderr, 'Nothing to do!'
sys.exit(1)
# Absolutize paths, and check that they make sense
curr_abspath = os.path.abspath(os.getcwd()) # os.getcwd() always returns the hard path with any symbolic links resolved, even if we cd'd into a symbolic link.
for file_ in data_files:
if not file_['explicit_dst_path']:
# This file was not defined with src@dst, so we inferred the destination from the source. In that case,
# we require that the destination not be under the current location
path = file_['dstpath']
abspath = os.path.realpath(os.path.abspath(path)) # Use os.path.realpath to resolve any symbolic links to hard paths, to match the structure in curr_abspath.
if DEBUG: print >> sys.stderr, path, abspath, curr_abspath
if not abspath.startswith(curr_abspath):
print >> sys.stderr, 'Error: Embedding "%s" which is below the current directory "%s". This is invalid since the current directory becomes the root that the generated code will see' % (path, curr_abspath)
sys.exit(1)
file_['dstpath'] = abspath[len(curr_abspath)+1:]
if os.path.isabs(path):
print >> sys.stderr, 'Warning: Embedding an absolute file/directory name "' + path + '" to the virtual filesystem. The file will be made available in the relative path "' + file_['dstpath'] + '". You can use the explicit syntax --preload-file srcpath@dstpath to explicitly specify the target location the absolute source path should be directed to.'
for file_ in data_files:
file_['dstpath'] = file_['dstpath'].replace(os.path.sep, '/') # name in the filesystem, native and emulated
if file_['dstpath'].endswith('/'): # If user has submitted a directory name as the destination but omitted the destination filename, use the filename from source file
file_['dstpath'] = file_['dstpath'] + os.path.basename(file_['srcpath'])
# make destination path always relative to the root
file_['dstpath'] = posixpath.normpath(os.path.join('/', file_['dstpath']))
if DEBUG:
print >> sys.stderr, 'Packaging file "' + file_['srcpath'] + '" to VFS in path "' + file_['dstpath'] + '".'
# Remove duplicates (can occur naively, for example preload dir/, preload dir/subdir/)
seen = {}
def was_seen(name):
if seen.get(name): return True
seen[name] = 1
return False
data_files = filter(lambda file_: not was_seen(file_['dstpath']), data_files)
if AV_WORKAROUND:
random.shuffle(data_files)
# Apply plugins
for file_ in data_files:
for plugin in plugins:
plugin(file_)
# Crunch files
if crunch:
shutil.copyfile(shared.path_from_root('tools', 'crunch-worker.js'), 'crunch-worker.js')
ret += '''
var decrunchWorker = new Worker('crunch-worker.js');
var decrunchCallbacks = [];
decrunchWorker.onmessage = function(msg) {
decrunchCallbacks[msg.data.callbackID](msg.data.data);
console.log('decrunched ' + msg.data.filename + ' in ' + msg.data.time + ' ms, ' + msg.data.data.length + ' bytes');
decrunchCallbacks[msg.data.callbackID] = null;
};
function requestDecrunch(filename, data, callback) {
decrunchWorker.postMessage({
filename: filename,
data: new Uint8Array(data),
callbackID: decrunchCallbacks.length
});
decrunchCallbacks.push(callback);
}
'''
for file_ in data_files:
if file_['dstpath'].endswith(CRUNCH_INPUT_SUFFIX):
src_dds_name = file_['srcpath']
src_crunch_name = unsuffixed(src_dds_name) + CRUNCH_OUTPUT_SUFFIX
# Preload/embed the .crn version instead of the .dds version, but use the .dds suffix for the target file in the virtual FS.
file_['srcpath'] = src_crunch_name
try:
# Do not crunch if crunched version exists and is more recent than dds source
crunch_time = os.stat(src_crunch_name).st_mtime
dds_time = os.stat(src_dds_name).st_mtime
if dds_time < crunch_time: continue
except:
pass # if one of them does not exist, continue on
# guess at format. this lets us tell crunch to not try to be clever and use odd formats like DXT5_AGBR
try:
format = Popen(['file', file_['srcpath']], stdout=PIPE).communicate()[0]
if 'DXT5' in format:
format = ['-dxt5']
elif 'DXT1' in format:
format = ['-dxt1']
else:
raise Exception('unknown format')
except:
format = []
Popen([CRUNCH, '-outsamedir', '-file', src_dds_name, '-quality', crunch] + format, stdout=sys.stderr).communicate()
#if not os.path.exists(os.path.basename(crunch_name)):
# print >> sys.stderr, 'Failed to crunch, perhaps a weird dxt format? Looking for a source PNG for the DDS'
# Popen([CRUNCH, '-file', unsuffixed(file_['srcpath']) + '.png', '-quality', crunch] + format, stdout=sys.stderr).communicate()
assert os.path.exists(src_crunch_name), 'crunch failed to generate output'
# prepend the dds header
crunched = open(src_crunch_name, 'rb').read()
c = open(src_crunch_name, 'wb')
c.write(open(src_dds_name, 'rb').read()[:DDS_HEADER_SIZE])
c.write(crunched)
c.close()
# Set up folders
partial_dirs = []
for file_ in data_files:
dirname = os.path.dirname(file_['dstpath'])
dirname = dirname.lstrip('/') # absolute paths start with '/', remove that
if dirname != '':
parts = dirname.split('/')
for i in range(len(parts)):
partial = '/'.join(parts[:i+1])
if partial not in partial_dirs:
code += '''Module['FS_createPath']('/%s', '%s', true, true);\n''' % ('/'.join(parts[:i]), parts[i])
partial_dirs.append(partial)
if has_preloaded:
# Bundle all datafiles into one archive. Avoids doing lots of simultaneous XHRs which has overhead.
data = open(data_target, 'wb')
start = 0
for file_ in data_files:
file_['data_start'] = start
curr = open(file_['srcpath'], 'rb').read()
file_['data_end'] = start + len(curr)
if AV_WORKAROUND: curr += '\x00'
#print >> sys.stderr, 'bundling', file_['srcpath'], file_['dstpath'], file_['data_start'], file_['data_end']
start += len(curr)
data.write(curr)
data.close()
# TODO: sha256sum on data_target
if Compression.on:
Compression.compress(data_target)
# Data requests - for getting a block of data out of the big archive - have a similar API to XHRs
code += '''
function DataRequest(start, end, crunched, audio) {
this.start = start;
this.end = end;
this.crunched = crunched;
this.audio = audio;
}
DataRequest.prototype = {
requests: {},
open: function(mode, name) {
this.name = name;
this.requests[name] = this;
Module['addRunDependency']('fp ' + this.name);
},
send: function() {},
onload: function() {
var byteArray = this.byteArray.subarray(this.start, this.end);
%s
this.finish(byteArray);
%s
},
finish: function(byteArray) {
var that = this;
Module['FS_createPreloadedFile'](this.name, null, byteArray, true, true, function() {
Module['removeRunDependency']('fp ' + that.name);
}, function() {
if (that.audio) {
Module['removeRunDependency']('fp ' + that.name); // workaround for chromium bug 124926 (still no audio with this, but at least we don't hang)
} else {
Module.printErr('Preloading file ' + that.name + ' failed');
}
}, false, true); // canOwn this data in the filesystem, it is a slide into the heap that will never change
this.requests[this.name] = null;
},
};
''' % ('' if not crunch else '''
if (this.crunched) {
var ddsHeader = byteArray.subarray(0, 128);
var that = this;
requestDecrunch(this.name, byteArray.subarray(128), function(ddsData) {
byteArray = new Uint8Array(ddsHeader.length + ddsData.length);
byteArray.set(ddsHeader, 0);
byteArray.set(ddsData, 128);
that.finish(byteArray);
});
} else {
''', '' if not crunch else '''
}
''')
counter = 0
for file_ in data_files:
filename = file_['dstpath']
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if file_['mode'] == 'embed':
# Embed
data = map(ord, open(file_['srcpath'], 'rb').read())
code += '''fileData%d = [];\n''' % counter
if data:
parts = []
chunk_size = 10240
start = 0
while start < len(data):
parts.append('''fileData%d.push.apply(fileData%d, %s);\n''' % (counter, counter, str(data[start:start+chunk_size])))
start += chunk_size
code += ''.join(parts)
code += '''Module['FS_createDataFile']('%s', '%s', fileData%d, true, true);\n''' % (dirname, basename, counter)
counter += 1
elif file_['mode'] == 'preload':
# Preload
varname = 'filePreload%d' % counter
counter += 1
code += ''' new DataRequest(%(start)d, %(end)d, %(crunched)s, %(audio)s).open('GET', '%(filename)s');
''' % {
'filename': file_['dstpath'],
'start': file_['data_start'],
'end': file_['data_end'],
'crunched': '1' if crunch and filename.endswith(CRUNCH_INPUT_SUFFIX) else '0',
'audio': '1' if filename[-4:] in AUDIO_SUFFIXES else '0',
}
else:
assert 0
if has_preloaded:
# Get the big archive and split it up
if no_heap_copy:
use_data = '''
// copy the entire loaded file into a spot in the heap. Files will refer to slices in that. They cannot be freed though.
var ptr = Module['_malloc'](byteArray.length);
Module['HEAPU8'].set(byteArray, ptr);
DataRequest.prototype.byteArray = Module['HEAPU8'].subarray(ptr, ptr+byteArray.length);
'''
else:
use_data = '''
// Reuse the bytearray from the XHR as the source for file reads.
DataRequest.prototype.byteArray = byteArray;
'''
for file_ in data_files:
if file_['mode'] == 'preload':
use_data += ' DataRequest.prototype.requests["%s"].onload();\n' % (file_['dstpath'])
use_data += " Module['removeRunDependency']('datafile_%s');\n" % data_target
if Compression.on:
use_data = '''
Module["decompress"](byteArray, function(decompressed) {
byteArray = new Uint8Array(decompressed);
%s
});
''' % use_data
package_uuid = uuid.uuid4();
package_name = Compression.compressed_name(data_target) if Compression.on else data_target
statinfo = os.stat(package_name)
remote_package_size = statinfo.st_size
remote_package_name = os.path.basename(package_name)
ret += r'''
var PACKAGE_PATH;
if (typeof window === 'object') {
PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.toString().substring(0, window.location.pathname.toString().lastIndexOf('/')) + '/');
} else {
// worker
PACKAGE_PATH = encodeURIComponent(location.pathname.toString().substring(0, location.pathname.toString().lastIndexOf('/')) + '/');
}
var PACKAGE_NAME = '%s';
var REMOTE_PACKAGE_NAME = (Module['filePackagePrefixURL'] || '') + '%s';
var REMOTE_PACKAGE_SIZE = %d;
var PACKAGE_UUID = '%s';
''' % (data_target, remote_package_name, remote_package_size, package_uuid)
if use_preload_cache:
code += r'''
var indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
var IDB_RO = "readonly";
var IDB_RW = "readwrite";
var DB_NAME = 'EM_PRELOAD_CACHE';
var DB_VERSION = 1;
var METADATA_STORE_NAME = 'METADATA';
var PACKAGE_STORE_NAME = 'PACKAGES';
function openDatabase(callback, errback) {
try {
var openRequest = indexedDB.open(DB_NAME, DB_VERSION);
} catch (e) {
return errback(e);
}
openRequest.onupgradeneeded = function(event) {
var db = event.target.result;
if(db.objectStoreNames.contains(PACKAGE_STORE_NAME)) {
db.deleteObjectStore(PACKAGE_STORE_NAME);
}
var packages = db.createObjectStore(PACKAGE_STORE_NAME);
if(db.objectStoreNames.contains(METADATA_STORE_NAME)) {
db.deleteObjectStore(METADATA_STORE_NAME);
}
var metadata = db.createObjectStore(METADATA_STORE_NAME);
};
openRequest.onsuccess = function(event) {
var db = event.target.result;
callback(db);
};
openRequest.onerror = function(error) {
errback(error);
};
};
/* Check if there's a cached package, and if so whether it's the latest available */
function checkCachedPackage(db, packageName, callback, errback) {
var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO);
var metadata = transaction.objectStore(METADATA_STORE_NAME);
var getRequest = metadata.get(packageName);
getRequest.onsuccess = function(event) {
var result = event.target.result;
if (!result) {
return callback(false);
} else {
return callback(PACKAGE_UUID === result.uuid);
}
};
getRequest.onerror = function(error) {
errback(error);
};
};
function fetchCachedPackage(db, packageName, callback, errback) {
var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO);
var packages = transaction.objectStore(PACKAGE_STORE_NAME);
var getRequest = packages.get(packageName);
getRequest.onsuccess = function(event) {
var result = event.target.result;
callback(result);
};
getRequest.onerror = function(error) {
errback(error);
};
};
function cacheRemotePackage(db, packageName, packageData, packageMeta, callback, errback) {
var transaction = db.transaction([PACKAGE_STORE_NAME, METADATA_STORE_NAME], IDB_RW);
var packages = transaction.objectStore(PACKAGE_STORE_NAME);
var metadata = transaction.objectStore(METADATA_STORE_NAME);
var putPackageRequest = packages.put(packageData, packageName);
putPackageRequest.onsuccess = function(event) {
var putMetadataRequest = metadata.put(packageMeta, packageName);
putMetadataRequest.onsuccess = function(event) {
callback(packageData);
};
putMetadataRequest.onerror = function(error) {
errback(error);
};
};
putPackageRequest.onerror = function(error) {
errback(error);
};
};
'''
ret += r'''
function fetchRemotePackage(packageName, packageSize, callback, errback) {
var xhr = new XMLHttpRequest();
xhr.open('GET', packageName, true);
xhr.responseType = 'arraybuffer';
xhr.onprogress = function(event) {
var url = packageName;
var size = packageSize;
if (event.total) size = event.total;
if (event.loaded) {
if (!xhr.addedTotal) {
xhr.addedTotal = true;
if (!Module.dataFileDownloads) Module.dataFileDownloads = {};
Module.dataFileDownloads[url] = {
loaded: event.loaded,
total: size
};
} else {
Module.dataFileDownloads[url].loaded = event.loaded;
}
var total = 0;
var loaded = 0;
var num = 0;
for (var download in Module.dataFileDownloads) {
var data = Module.dataFileDownloads[download];
total += data.total;
loaded += data.loaded;
num++;
}
total = Math.ceil(total * Module.expectedDataFileDownloads/num);
if (Module['setStatus']) Module['setStatus']('Downloading data... (' + loaded + '/' + total + ')');
} else if (!Module.dataFileDownloads) {
if (Module['setStatus']) Module['setStatus']('Downloading data...');
}
};
xhr.onload = function(event) {
var packageData = xhr.response;
callback(packageData);
};
xhr.send(null);
};
function handleError(error) {
console.error('package error:', error);
};
'''
code += r'''
function processPackageData(arrayBuffer) {
Module.finishedDataFileDownloads++;
assert(arrayBuffer, 'Loading data file failed.');
var byteArray = new Uint8Array(arrayBuffer);
var curr;
%s
};
Module['addRunDependency']('datafile_%s');
''' % (use_data, data_target) # use basename because from the browser's point of view, we need to find the datafile in the same dir as the html file
code += r'''
if (!Module.preloadResults) Module.preloadResults = {};
'''
if use_preload_cache:
code += r'''
function preloadFallback(error) {
console.error(error);
console.error('falling back to default preload behavior');
fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE, processPackageData, handleError);
};
openDatabase(
function(db) {
checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME,
function(useCached) {
Module.preloadResults[PACKAGE_NAME] = {fromCache: useCached};
if (useCached) {
console.info('loading ' + PACKAGE_NAME + ' from cache');
fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, processPackageData, preloadFallback);
} else {
console.info('loading ' + PACKAGE_NAME + ' from remote');
fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE,
function(packageData) {
cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID}, processPackageData,
function(error) {
console.error(error);
processPackageData(packageData);
});
}
, preloadFallback);
}
}
, preloadFallback);
}
, preloadFallback);
if (Module['setStatus']) Module['setStatus']('Downloading...');
'''
else:
# Not using preload cache, so we might as well start the xhr ASAP, potentially before JS parsing of the main codebase if it's after us.
# Only tricky bit is the fetch is async, but also when runWithFS is called is async, so we handle both orderings.
ret += r'''
var fetched = null, fetchedCallback = null;
fetchRemotePackage(REMOTE_PACKAGE_NAME, REMOTE_PACKAGE_SIZE, function(data) {
if (fetchedCallback) {
fetchedCallback(data);
fetchedCallback = null;
} else {
fetched = data;
}
}, handleError);
'''
code += r'''
Module.preloadResults[PACKAGE_NAME] = {fromCache: false};
if (fetched) {
processPackageData(fetched);
fetched = null;
} else {
fetchedCallback = processPackageData;
}
'''
ret += '''
function runWithFS() {
'''
ret += code
ret += '''
}
if (Module['calledRun']) {
runWithFS();
} else {
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(runWithFS); // FS is not initialized yet, wait for it
}
'''
if crunch:
ret += '''
if (!Module['postRun']) Module['postRun'] = [];
Module["postRun"].push(function() {
decrunchWorker.terminate();
});
'''
ret += '''
})();
'''
if force or len(data_files) > 0:
if jsoutput == None:
print ret
else:
f = open(jsoutput, 'w')
f.write(ret)
|
py
|
1a5c144399578ed656fd9a319bf5c7a3b2665f7f
|
"""
Modified from OpenAI Baselines code to work with multi-agent envs
reference: https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_env.py
"""
import os
import contextlib
import numpy as np
from abc import ABC, abstractmethod
#####################################################################################
### funcs
####################################################################################
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
#####################################################################################
### vec env
####################################################################################
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
#####################################################################################
### example wrapper
####################################################################################
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
|
py
|
1a5c156b0648c8561211130ba65c04077adaecf3
|
from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from tastypie.exceptions import BadRequest
from crits.services.handlers import add_result, add_log, finish_task
from crits.services.service import CRITsService
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class ServiceResource(CRITsAPIResource):
"""
Class to handle everything related to the Services API.
Currently supports POST.
"""
class Meta:
object_class = CRITsService
allowed_methods = ('post',)
resource_name = "services"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(ServiceResource, self).get_object_list(request,
CRITsService,
False)
def obj_create(self, bundle, **kwargs):
"""
Handles creating service result entries through the API.
:param bundle: Bundle containing the service results to add.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
object_type = bundle.data.get('object_type', None)
object_id = bundle.data.get('object_id', None)
analysis_id = bundle.data.get('analysis_id', None)
result = bundle.data.get('result', None)
result_type = bundle.data.get('result_type', None)
result_subtype = bundle.data.get('result_subtype', None)
log_message = bundle.data.get('log_message', None)
log_level = bundle.data.get('log_level', 'info')
status = bundle.data.get('status', None)
finish = bundle.data.get('finish', False)
success = True
message = ""
content = {'return_code': 1,
'type': object_type}
if not object_type or not object_id or not analysis_id:
content['message'] = 'Need an object type, object id, and analysis id.'
self.crits_response(content)
if result:
if not result_type or not result_subtype:
content['message'] = 'When adding a result, also need type and subtype'
self.crits_response(content)
result = add_result(object_type, object_id, analysis_id,
result, result_type, result_subtype, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
if log_message:
result = add_log(object_type, object_id, analysis_id,
log_message, log_level, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
if finish:
result = finish_task(object_type, object_id, analysis_id,
status, analyst)
if not result['success']:
message += ", %s" % result['message']
success = False
content['message'] = message
content['id'] = object_id
rname = self.resource_name_from_type(object_type)
url = reverse('api_dispatch_detail',
kwargs={'resource_name': rname,
'api_name': 'v1',
'pk': object_id})
content['url'] = url
if success:
content['return_code'] = 0
self.crits_response(content)
|
py
|
1a5c1708766ae19f2c77ef5d65f3e0decb993d52
|
import torch.nn as nn
from torch.autograd import Variable
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers):
super(RNNModel, self).__init__()
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, bias=False)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, bias=False)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.encoder(input)
output, hidden = self.rnn(emb, hidden)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
|
tac
|
1a5c17609ff049cfdc07c6515fdc7743f63b5758
|
VTABLE(_Main) {
<empty>
Main
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T0 = 4
parm _T0
_T1 = call _Alloc
_T2 = VTBL <_Main>
*(_T1 + 0) = _T2
return _T1
}
FUNCTION(main) {
memo ''
main:
_T6 = 2
_T4 = _T6
_T7 = 2
_T8 = 3
_T9 = (_T7 - _T8)
_T5 = _T9
_T10 = 3
_T11 = 0
_T12 = (_T5 < _T11)
if (_T12 == 0) branch _L10
_T13 = "Decaf runtime error: The length of the created array should not be less than 0.\n"
parm _T13
call _PrintString
call _Halt
_L10:
_T14 = 4
_T15 = (_T14 * _T5)
_T16 = (_T14 + _T15)
parm _T16
_T17 = call _Alloc
*(_T17 + 0) = _T5
_T17 = (_T17 + _T16)
_L11:
_T16 = (_T16 - _T14)
if (_T16 == 0) branch _L12
_T17 = (_T17 - _T14)
*(_T17 + 0) = _T10
branch _L11
_L12:
_T3 = _T17
_T18 = 1
_T19 = *(_T3 - 4)
_T20 = (_T18 < _T19)
if (_T20 == 0) branch _L13
_T21 = 0
_T22 = (_T18 < _T21)
if (_T22 == 0) branch _L14
_L13:
_T23 = "Decaf runtime error: Array subscript out of bounds\n"
parm _T23
call _PrintString
call _Halt
_L14:
_T24 = 4
_T25 = (_T18 * _T24)
_T26 = (_T3 + _T25)
_T27 = *(_T26 + 0)
_T28 = 1
_T29 = *(_T3 - 4)
_T30 = (_T28 < _T29)
if (_T30 == 0) branch _L15
_T31 = 0
_T32 = (_T28 < _T31)
if (_T32 == 0) branch _L16
_L15:
_T33 = "Decaf runtime error: Array subscript out of bounds\n"
parm _T33
call _PrintString
call _Halt
_L16:
_T34 = 4
_T35 = (_T28 * _T34)
_T36 = (_T3 + _T35)
_T37 = *(_T36 + 0)
_T38 = 1
_T39 = (_T37 + _T38)
_T40 = 4
_T41 = (_T18 * _T40)
_T42 = (_T3 + _T41)
*(_T42 + 0) = _T39
_T43 = 0
_T44 = *(_T3 - 4)
_T45 = (_T43 < _T44)
if (_T45 == 0) branch _L17
_T46 = 0
_T47 = (_T43 < _T46)
if (_T47 == 0) branch _L18
_L17:
_T48 = "Decaf runtime error: Array subscript out of bounds\n"
parm _T48
call _PrintString
call _Halt
_L18:
_T49 = 4
_T50 = (_T43 * _T49)
_T51 = (_T3 + _T50)
_T52 = *(_T51 + 0)
parm _T52
call _PrintInt
_T53 = "\n"
parm _T53
call _PrintString
_T54 = 1
_T55 = *(_T3 - 4)
_T56 = (_T54 < _T55)
if (_T56 == 0) branch _L19
_T57 = 0
_T58 = (_T54 < _T57)
if (_T58 == 0) branch _L20
_L19:
_T59 = "Decaf runtime error: Array subscript out of bounds\n"
parm _T59
call _PrintString
call _Halt
_L20:
_T60 = 4
_T61 = (_T54 * _T60)
_T62 = (_T3 + _T61)
_T63 = *(_T62 + 0)
parm _T63
call _PrintInt
_T64 = "\n"
parm _T64
call _PrintString
}
|
py
|
1a5c17af02c2fa2534678b1bdacfe4de86c682c3
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures"""
import warnings
from typing import Optional
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import Seq2SeqLMOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
from .configuration_encoder_decoder import EncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "EncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the "
"encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if fine-tuning "
"a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the labels, no "
"need to pass them yourself anymore."
)
ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
[`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
(see the examples for more information).
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~file_utils.Seq2SeqLMOutput`] instead of a plain tuple.
kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
"""
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class EncoderDecoderModel(PreTrainedModel):
r"""
[`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config_class = EncoderDecoderConfig
base_model_prefix = "encoder_decoder"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[PreTrainedModel] = None,
decoder: Optional[PreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"Config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, "
"it has to be equal to the encoder's `hidden_size`. "
f"Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` "
f"and {config.encoder.hidden_size} for `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
from ..auto.modeling_auto import AutoModel
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
from ..auto.modeling_auto import AutoModelForCausalLM
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
# tie encoder, decoder weights if config set accordingly
self.tie_weights()
def tie_weights(self):
# tie encoder & decoder if needed
if self.config.tie_encoder_decoder:
# tie encoder and decoder base model
decoder_base_model_prefix = self.decoder.base_model_prefix
self._tie_encoder_decoder_weights(
self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix
)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_pretrained(cls, *args, **kwargs):
# At the moment fast initialization is not supported for composite models
if kwargs.get("_fast_init", False):
logger.warning(
"Fast initialization is currently not supported for EncoderDecoderModel. "
"Falling back to slow initialization..."
)
kwargs["_fast_init"] = False
return super().from_pretrained(*args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> PreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import EncoderDecoderModel
>>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2bert")
>>> # load fine-tuned model
>>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. "
f"Cross attention layers are added to {decoder_pretrained_model_name_or_path} "
f"and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for "
"cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "bert-base-uncased", "bert-base-uncased"
>>> ) # initialize Bert2Bert from pre-trained checkpoints
>>> # training
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
>>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2bert")
>>> model = EncoderDecoderModel.from_pretrained("bert2bert")
>>> # generation
>>> generated = model.generate(input_ids)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
if not return_dict:
if loss is not None:
return (loss,) + decoder_outputs + encoder_outputs
else:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
input_dict = {
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
"encoder_outputs": encoder_outputs,
"past_key_values": decoder_inputs["past_key_values"],
"use_cache": use_cache,
}
return input_dict
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the EncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
|
py
|
1a5c17c3fc181e3af943e3983844807bede8dec3
|
import _sk_fail; _sk_fail._("imghdr")
|
py
|
1a5c192c7970ef37bee485ece23b51e7743d0685
|
def qmBreak(debugger, command, result, internal_dict):
# the follow command show how to write break command
# debugger.HandleCommand('br s -r \'\[BluetoothVC babyDelegate\]$\'')
# debugger.HandleCommand('br s -n \'[NSData(AESAdditions) AES256EncryptWithKey:iv:]\'')
|
py
|
1a5c1942e0446f417a573bf295aabdb896ff7ebf
|
# coding: utf-8
# Copyright 2020 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the ibm_security_advisor_findings_api_sdk service API operations
"""
import pytest
import unittest
import datetime
# import json
# import os
from ibm_cloud_security_advisor.findings_api_v1 import ValueType
from ibm_cloud_sdk_core import BaseService
from unittest.mock import patch
from unittest import mock
m = mock.Mock()
class TestFindingsApiValueTypeClass(unittest.TestCase):
app = {}
@classmethod
def setup_class(cls):
print("\nrunning setup preparation...")
TestFindingsApiValueTypeClass.app = ValueType(kind="abc", text="abc",
)
# read env vars
#envvars = read_credentials()
@classmethod
def teardown_class(cls):
print("\nrunning teardown, cleaning up the env...")
#print("teardown:delete note")
"""_from_dict test cases """
def test_from_dict_success(self):
ValueType._from_dict({
"kind": "abc",
"text":"abc"
})
def test_from_dict_kind_error(self):
self.assertRaises(
ValueError, ValueType._from_dict, {"text": "abc"})
def test_from_dict_text_error(self):
self.assertRaises(
ValueError, ValueType._from_dict, {"kind": "abc"})
"""_from_dict test cases """
def test_from_dict_bad_key_neg(self):
self.assertRaises(
ValueError, ValueType._from_dict, {"bad_key": "abc"})
"""_to_dict test cases """
def test_to_dict_success(self):
TestFindingsApiValueTypeClass.app.to_dict()
"""__str__ test cases """
def test__str__success(self):
TestFindingsApiValueTypeClass.app.__str__()
"""__eq__ test cases """
def test__eq__isinstance(self):
TestFindingsApiValueTypeClass.app.__eq__(TestFindingsApiValueTypeClass.app)
def test__eq__not_isinstance(self):
TestFindingsApiValueTypeClass.app.__eq__({})
"""__ne__ test cases """
def test__ne__isinstance(self):
TestFindingsApiValueTypeClass.app.__ne__(TestFindingsApiValueTypeClass.app)
|
bzl
|
1a5c1977d2276f0f2b26b7b863f5c3db8fef42a6
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//kotlin/internal/utils:utils.bzl",
_utils = "utils",
)
KtJvmPluginInfo = provider(
doc = "This provider contains the plugin info for the JVM aspect",
fields = {
"annotation_processors": "a serializeable list of structs containing annotation processor definitions",
"transitive_runtime_jars": "set of jars required during annotation processor execution",
},
)
_EMPTY_PLUGIN_INFO = [KtJvmPluginInfo(annotation_processors = [], transitive_runtime_jars = depset())]
def merge_plugin_infos(attrs):
"""Merge all of the plugin infos found in the provided sequence of attributes.
Returns:
A KtJvmPluginInfo provider, Each of the entries is serializable."""
tally = {}
annotation_processors = []
runtime_jars = depset()
for info in [a[KtJvmPluginInfo] for a in attrs]:
for p in info.annotation_processors:
if p.label not in tally:
tally[p.label] = True
annotation_processors.append(p)
runtime_jars += info.transitive_runtime_jars
return KtJvmPluginInfo(
annotation_processors = annotation_processors,
transitive_runtime_jars = runtime_jars,
)
def _kt_jvm_plugin_aspect_impl(target, ctx):
if ctx.rule.kind == "java_plugin":
processor = ctx.rule.attr
merged_deps = java_common.merge([j[JavaInfo] for j in processor.deps])
return [KtJvmPluginInfo(
annotation_processors = [
struct(
label = _utils.restore_label(ctx.label),
processor_class = processor.processor_class,
classpath = [cp.path for cp in merged_deps.transitive_runtime_jars],
generates_api = processor.generates_api,
),
],
transitive_runtime_jars = merged_deps.transitive_runtime_jars,
)]
elif ctx.rule.kind == "java_library":
return [merge_plugin_infos(ctx.rule.attr.exported_plugins)]
else:
return _EMPTY_PLUGIN_INFO
kt_jvm_plugin_aspect = aspect(
doc = """This aspect collects Java Plugins info and other Kotlin compiler plugin configurations from the graph.""",
attr_aspects = [
"plugins",
"exported_plugins",
],
provides = [KtJvmPluginInfo],
implementation = _kt_jvm_plugin_aspect_impl,
)
|
py
|
1a5c1a8e4080602ab7b3d1bd7d0647f1e08de09c
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class NetlibScalapack(CMakePackage):
"""ScaLAPACK is a library of high-performance linear algebra routines for
parallel distributed memory machines
"""
homepage = "http://www.netlib.org/scalapack/"
url = "http://www.netlib.org/scalapack/scalapack-2.0.2.tgz"
version('2.0.2', '2f75e600a2ba155ed9ce974a1c4b536f')
version('2.0.1', '17b8cde589ea0423afe1ec43e7499161')
version('2.0.0', '9e76ae7b291be27faaad47cfc256cbfe')
# versions before 2.0.0 are not using cmake and requires blacs as
# a separated package
variant(
'shared',
default=True,
description='Build the shared library version'
)
variant(
'pic',
default=False,
description='Build position independent code'
)
provides('scalapack')
depends_on('mpi')
depends_on('lapack')
depends_on('blas')
depends_on('cmake', when='@2.0.0:', type='build')
# See: https://github.com/Reference-ScaLAPACK/scalapack/issues/9
patch("cmake_fortran_mangle.patch", when='@2.0.2:')
@property
def libs(self):
# Note that the default will be to search
# for 'libnetlib-scalapack.<suffix>'
shared = True if '+shared' in self.spec else False
return find_libraries(
'libscalapack', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
options = [
"-DBUILD_SHARED_LIBS:BOOL=%s" % ('ON' if '+shared' in spec else
'OFF'),
"-DBUILD_STATIC_LIBS:BOOL=%s" % ('OFF' if '+shared' in spec else
'ON')
]
# Make sure we use Spack's Lapack:
blas = spec['blas'].libs
lapack = spec['lapack'].libs
options.extend([
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS=%s' % spec['lapack'].prefix.include,
'-DLAPACK_LIBRARIES=%s' % (lapack.joined(';')),
'-DBLAS_LIBRARIES=%s' % (blas.joined(';'))
])
if '+pic' in spec:
options.extend([
"-DCMAKE_C_FLAGS=%s" % self.compiler.pic_flag,
"-DCMAKE_Fortran_FLAGS=%s" % self.compiler.pic_flag
])
return options
@run_after('install')
def fix_darwin_install(self):
# The shared libraries are not installed correctly on Darwin:
if (sys.platform == 'darwin') and ('+shared' in self.spec):
fix_darwin_install_name(self.spec.prefix.lib)
|
py
|
1a5c1b56fbb49e02e2fa7e20308bd9567e544e42
|
"""
Tests for Term.
"""
from collections import Counter
from itertools import product
from unittest import TestCase
from toolz import assoc
import pandas as pd
from zipline.assets import Asset, ExchangeInfo
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonWindowSafeInput,
NotDType,
TermInputsNotSpecified,
NonPipelineInputs,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.pipeline import (
Classifier,
CustomClassifier,
CustomFactor,
Factor,
Filter,
ExecutionPlan,
)
from zipline.pipeline.data import Column, DataSet
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.expression import NUMEXPR_MATH_FUNCS
from zipline.pipeline.factors import RecarrayField
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists, Slice
from zipline.testing import parameter_space
from zipline.testing.fixtures import WithTradingSessions, ZiplineTestCase
from zipline.testing.predicates import (
assert_equal,
assert_raises,
assert_raises_regex,
assert_regex,
)
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
complex128_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
NoDefaultMissingValue,
)
class SomeDataSet(DataSet):
foo = Column(float64_dtype)
bar = Column(float64_dtype)
buzz = Column(float64_dtype)
class SubDataSet(SomeDataSet):
pass
class SubDataSetNewCol(SomeDataSet):
qux = Column(float64_dtype)
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
SomeFactorAlias = SomeFactor
class SomeOtherFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class DateFactor(Factor):
dtype = datetime64ns_dtype
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
class NoLookbackFactor(Factor):
dtype = float64_dtype
window_length = 0
class GenericCustomFactor(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo]
class MultipleOutputs(CustomFactor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = ['alpha', 'beta']
def some_method(self):
return
class GenericFilter(Filter):
dtype = bool_dtype
window_length = 0
inputs = []
class GenericClassifier(Classifier):
dtype = categorical_dtype
window_length = 0
inputs = []
def gen_equivalent_factors():
"""
Return an iterator of SomeFactor instances that should all be the same
object.
"""
yield SomeFactor()
yield SomeFactor(inputs=NotSpecified)
yield SomeFactor(SomeFactor.inputs)
yield SomeFactor(inputs=SomeFactor.inputs)
yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
yield SomeFactor(window_length=SomeFactor.window_length)
yield SomeFactor(window_length=NotSpecified)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=NotSpecified,
)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=SomeFactor.window_length,
)
yield SomeFactorAlias()
def to_dict(l):
"""
Convert a list to a dict with keys drawn from '0', '1', '2', ...
Examples
--------
>>> to_dict([2, 3, 4]) # doctest: +SKIP
{'0': 2, '1': 3, '2': 4}
"""
return dict(zip(map(str, range(len(l))), l))
class DependencyResolutionTestCase(WithTradingSessions, ZiplineTestCase):
TRADING_CALENDAR_STRS = ('NYSE',)
START_DATE = pd.Timestamp('2014-01-02', tz='UTC')
END_DATE = pd.Timestamp('2014-12-31', tz='UTC')
execution_plan_start = pd.Timestamp('2014-06-01', tz='UTC')
execution_plan_end = pd.Timestamp('2014-06-30', tz='UTC')
def check_dependency_order(self, ordered_terms):
seen = set()
for term in ordered_terms:
for dep in term.dependencies:
self.assertIn(dep, seen)
seen.add(term)
def make_execution_plan(self, terms):
return ExecutionPlan(
terms,
self.nyse_sessions,
self.execution_plan_start,
self.execution_plan_end,
)
def test_single_factor(self):
"""
Test dependency resolution for a single factor.
"""
def check_output(graph):
resolution_order = list(graph.ordered())
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertIn(SomeDataSet.foo, resolution_order)
self.assertIn(SomeDataSet.bar, resolution_order)
self.assertIn(SomeFactor(), resolution_order)
self.assertEqual(
graph.graph.node[SomeDataSet.foo]['extra_rows'],
4,
)
self.assertEqual(
graph.graph.node[SomeDataSet.bar]['extra_rows'],
4,
)
for foobar in gen_equivalent_factors():
check_output(self.make_execution_plan(to_dict([foobar])))
def test_single_factor_instance_args(self):
"""
Test dependency resolution for a single factor with arguments passed to
the constructor.
"""
bar, buzz = SomeDataSet.bar, SomeDataSet.buzz
factor = SomeFactor([bar, buzz], window_length=5)
graph = self.make_execution_plan(to_dict([factor]))
resolution_order = list(graph.ordered())
# SomeFactor, its inputs, and AssetExists()
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertEqual(graph.extra_rows[AssetExists()], 4)
self.assertIn(bar, resolution_order)
self.assertIn(buzz, resolution_order)
self.assertIn(SomeFactor([bar, buzz], window_length=5),
resolution_order)
self.assertEqual(graph.extra_rows[bar], 4)
self.assertEqual(graph.extra_rows[buzz], 4)
def test_reuse_loadable_terms(self):
"""
Test that raw inputs only show up in the dependency graph once.
"""
f1 = SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
f2 = SomeOtherFactor([SomeDataSet.bar, SomeDataSet.buzz])
graph = self.make_execution_plan(to_dict([f1, f2]))
resolution_order = list(graph.ordered())
# bar should only appear once.
self.assertEqual(len(resolution_order), 6)
self.assertEqual(len(set(resolution_order)), 6)
self.check_dependency_order(resolution_order)
def test_disallow_recursive_lookback(self):
with self.assertRaises(NonWindowSafeInput):
SomeFactor(inputs=[SomeFactor(), SomeDataSet.foo])
def test_window_safety_one_window_length(self):
"""
Test that window safety problems are only raised if
the parent factor has window length greater than 1
"""
with self.assertRaises(NonWindowSafeInput):
SomeFactor(inputs=[SomeOtherFactor()])
SomeFactor(inputs=[SomeOtherFactor()], window_length=1)
class ObjectIdentityTestCase(TestCase):
def assertSameObject(self, *objs):
first = objs[0]
for obj in objs:
self.assertIs(first, obj)
def assertDifferentObjects(self, *objs):
id_counts = Counter(map(id, objs))
((most_common_id, count),) = id_counts.most_common(1)
if count > 1:
dupe = [o for o in objs if id(o) == most_common_id][0]
self.fail("%s appeared %d times in %s" % (dupe, count, objs))
def test_instance_caching(self):
self.assertSameObject(*gen_equivalent_factors())
self.assertIs(
SomeFactor(window_length=SomeFactor.window_length + 1),
SomeFactor(window_length=SomeFactor.window_length + 1),
)
self.assertIs(
SomeFactor(dtype=float64_dtype),
SomeFactor(dtype=float64_dtype),
)
self.assertIs(
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
mask = SomeFactor() + SomeOtherFactor()
self.assertIs(SomeFactor(mask=mask), SomeFactor(mask=mask))
def test_instance_caching_multiple_outputs(self):
self.assertIs(MultipleOutputs(), MultipleOutputs())
self.assertIs(
MultipleOutputs(),
MultipleOutputs(outputs=MultipleOutputs.outputs),
)
self.assertIs(
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Ensure that both methods of accessing our outputs return the same
# things.
multiple_outputs = MultipleOutputs()
alpha, beta = MultipleOutputs()
self.assertIs(alpha, multiple_outputs.alpha)
self.assertIs(beta, multiple_outputs.beta)
def test_instance_caching_of_slices(self):
my_asset = Asset(
1,
exchange_info=ExchangeInfo('TEST FULL', 'TEST', 'US'),
)
f = GenericCustomFactor()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericCustomFactor(), my_asset))
f = GenericFilter()
f_slice = f[my_asset]
self.assertIs(f_slice, Slice(GenericFilter(), my_asset))
c = GenericClassifier()
c_slice = c[my_asset]
self.assertIs(c_slice, Slice(GenericClassifier(), my_asset))
def test_instance_non_caching(self):
f = SomeFactor()
# Different window_length.
self.assertIsNot(
f,
SomeFactor(window_length=SomeFactor.window_length + 1),
)
# Different dtype
self.assertIsNot(
f,
SomeFactor(dtype=datetime64ns_dtype)
)
# Reordering inputs changes semantics.
self.assertIsNot(
f,
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
def test_instance_non_caching_redefine_class(self):
orig_foobar_instance = SomeFactorAlias()
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
self.assertIsNot(orig_foobar_instance, SomeFactor())
def test_instance_non_caching_multiple_outputs(self):
multiple_outputs = MultipleOutputs()
# Different outputs.
self.assertIsNot(
MultipleOutputs(), MultipleOutputs(outputs=['beta', 'gamma']),
)
# Reordering outputs.
self.assertIsNot(
multiple_outputs,
MultipleOutputs(
outputs=[
MultipleOutputs.outputs[1], MultipleOutputs.outputs[0],
],
),
)
# Different factors sharing an output name should produce different
# RecarrayField factors.
orig_beta = multiple_outputs.beta
beta, gamma = MultipleOutputs(outputs=['beta', 'gamma'])
self.assertIsNot(beta, orig_beta)
def test_instance_caching_binops(self):
f = SomeFactor()
g = SomeOtherFactor()
for lhs, rhs in product([f, g], [f, g]):
self.assertIs((lhs + rhs), (lhs + rhs))
self.assertIs((lhs - rhs), (lhs - rhs))
self.assertIs((lhs * rhs), (lhs * rhs))
self.assertIs((lhs / rhs), (lhs / rhs))
self.assertIs((lhs ** rhs), (lhs ** rhs))
self.assertIs((1 + rhs), (1 + rhs))
self.assertIs((rhs + 1), (rhs + 1))
self.assertIs((1 - rhs), (1 - rhs))
self.assertIs((rhs - 1), (rhs - 1))
self.assertIs((2 * rhs), (2 * rhs))
self.assertIs((rhs * 2), (rhs * 2))
self.assertIs((2 / rhs), (2 / rhs))
self.assertIs((rhs / 2), (rhs / 2))
self.assertIs((2 ** rhs), (2 ** rhs))
self.assertIs((rhs ** 2), (rhs ** 2))
self.assertIs((f + g) + (f + g), (f + g) + (f + g))
def test_instance_caching_unary_ops(self):
f = SomeFactor()
self.assertIs(-f, -f)
self.assertIs(--f, --f)
self.assertIs(---f, ---f)
def test_instance_caching_math_funcs(self):
f = SomeFactor()
for funcname in NUMEXPR_MATH_FUNCS:
method = getattr(f, funcname)
self.assertIs(method(), method())
def test_instance_caching_grouped_transforms(self):
f = SomeFactor()
c = GenericClassifier()
m = GenericFilter()
for meth in f.demean, f.zscore, f.rank:
self.assertIs(meth(), meth())
self.assertIs(meth(groupby=c), meth(groupby=c))
self.assertIs(meth(mask=m), meth(mask=m))
self.assertIs(meth(groupby=c, mask=m), meth(groupby=c, mask=m))
class SomeFactorParameterized(SomeFactor):
params = ('a', 'b')
def test_parameterized_term(self):
f = self.SomeFactorParameterized(a=1, b=2)
self.assertEqual(f.params, {'a': 1, 'b': 2})
g = self.SomeFactorParameterized(a=1, b=3)
h = self.SomeFactorParameterized(a=2, b=2)
self.assertDifferentObjects(f, g, h)
f2 = self.SomeFactorParameterized(a=1, b=2)
f3 = self.SomeFactorParameterized(b=2, a=1)
self.assertSameObject(f, f2, f3)
self.assertEqual(f.params['a'], 1)
self.assertEqual(f.params['b'], 2)
self.assertEqual(f.window_length, SomeFactor.window_length)
self.assertEqual(f.inputs, tuple(SomeFactor.inputs))
def test_parameterized_term_non_hashable_arg(self):
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=1)
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'a', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=1, b=[])
assert_equal(
str(e.exception),
"SomeFactorParameterized expected a hashable value for parameter"
" 'b', but got [] instead.",
)
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=[])
assert_regex(
str(e.exception),
r"SomeFactorParameterized expected a hashable value for parameter"
r" '(a|b)', but got \[\] instead\.",
)
def test_parameterized_term_default_value(self):
defaults = {'a': 'default for a', 'b': 'default for b'}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
assert_equal(F().params, defaults)
assert_equal(F(a='new a').params, assoc(defaults, 'a', 'new a'))
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_parameterized_term_default_value_with_not_specified(self):
defaults = {'a': 'default for a', 'b': NotSpecified}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
pattern = r"F expected a keyword parameter 'b'\."
with assert_raises_regex(TypeError, pattern):
F()
with assert_raises_regex(TypeError, pattern):
F(a='new a')
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(
F(a='new a', b='new b').params,
{'a': 'new a', 'b': 'new b'},
)
def test_bad_input(self):
class SomeFactor(Factor):
dtype = float64_dtype
class SomeFactorDefaultInputs(SomeFactor):
inputs = (SomeDataSet.foo, SomeDataSet.bar)
class SomeFactorDefaultLength(SomeFactor):
window_length = 10
class SomeFactorNoDType(SomeFactor):
window_length = 10
inputs = (SomeDataSet.foo,)
dtype = NotSpecified
with self.assertRaises(TermInputsNotSpecified):
SomeFactor(window_length=1)
with self.assertRaises(TermInputsNotSpecified):
SomeFactorDefaultLength()
with self.assertRaises(NonPipelineInputs):
SomeFactor(window_length=1, inputs=[2])
with self.assertRaises(WindowLengthNotSpecified):
SomeFactor(inputs=(SomeDataSet.foo,))
with self.assertRaises(WindowLengthNotSpecified):
SomeFactorDefaultInputs()
with self.assertRaises(DTypeNotSpecified):
SomeFactorNoDType()
with self.assertRaises(NotDType):
SomeFactor(dtype=1)
with self.assertRaises(NoDefaultMissingValue):
SomeFactor(dtype=int64_dtype)
with self.assertRaises(UnsupportedDType):
SomeFactor(dtype=complex128_dtype)
with self.assertRaises(TermOutputsEmpty):
MultipleOutputs(outputs=[])
def test_bad_output_access(self):
with self.assertRaises(AttributeError) as e:
SomeFactor().not_an_attr
errmsg = str(e.exception)
self.assertEqual(
errmsg, "'SomeFactor' object has no attribute 'not_an_attr'",
)
mo = MultipleOutputs()
with self.assertRaises(AttributeError) as e:
mo.not_an_attr
errmsg = str(e.exception)
expected = (
"Instance of MultipleOutputs has no output named 'not_an_attr'."
" Possible choices are: ('alpha', 'beta')."
)
self.assertEqual(errmsg, expected)
with self.assertRaises(ValueError) as e:
alpha, beta = GenericCustomFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg, "GenericCustomFactor does not have multiple outputs.",
)
# Public method, user-defined method.
# Accessing these attributes should return the output, not the method.
conflicting_output_names = ['zscore', 'some_method']
mo = MultipleOutputs(outputs=conflicting_output_names)
for name in conflicting_output_names:
self.assertIsInstance(getattr(mo, name), RecarrayField)
# Non-callable attribute, private method, special method.
disallowed_output_names = ['inputs', '_init', '__add__']
for name in disallowed_output_names:
with self.assertRaises(InvalidOutputName):
GenericCustomFactor(outputs=[name])
def test_require_super_call_in_validate(self):
class MyFactor(Factor):
inputs = ()
dtype = float64_dtype
window_length = 0
def _validate(self):
"Woops, I didn't call super()!"
with self.assertRaises(AssertionError) as e:
MyFactor()
errmsg = str(e.exception)
self.assertEqual(
errmsg,
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
def test_latest_on_different_dtypes(self):
factor_dtypes = (float64_dtype, datetime64ns_dtype)
for column in TestingDataSet.columns:
if column.dtype == bool_dtype:
self.assertIsInstance(column.latest, Filter)
elif (column.dtype == int64_dtype
or column.dtype.kind in ('O', 'S', 'U')):
self.assertIsInstance(column.latest, Classifier)
elif column.dtype in factor_dtypes:
self.assertIsInstance(column.latest, Factor)
else:
self.fail(
"Unknown dtype %s for column %s" % (column.dtype, column)
)
# These should be the same value, plus this has the convenient
# property of correctly handling `NaN`.
self.assertIs(column.missing_value, column.latest.missing_value)
def test_failure_timing_on_bad_dtypes(self):
# Just constructing a bad column shouldn't fail.
Column(dtype=int64_dtype)
with self.assertRaises(NoDefaultMissingValue) as e:
class BadDataSet(DataSet):
bad_column = Column(dtype=int64_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
self.assertTrue(
str(e.exception.args[0]).startswith(
"Failed to create Column with name 'bad_column'"
)
)
Column(dtype=complex128_dtype)
with self.assertRaises(UnsupportedDType):
class BadDataSetComplex(DataSet):
bad_column = Column(dtype=complex128_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
class SubDataSetTestCase(TestCase):
def test_subdataset(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_map = {
column.name: column for column in SubDataSet.columns
}
self.assertEqual(
{column.name for column in SomeDataSet.columns},
{column.name for column in SubDataSet.columns},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
def test_add_column(self):
some_dataset_map = {
column.name: column for column in SomeDataSet.columns
}
sub_dataset_new_col_map = {
column.name: column for column in SubDataSetNewCol.columns
}
sub_col_names = {column.name for column in SubDataSetNewCol.columns}
# check our extra col
self.assertIn('qux', sub_col_names)
self.assertEqual(
sub_dataset_new_col_map['qux'].dtype,
float64_dtype,
)
self.assertEqual(
{column.name for column in SomeDataSet.columns},
sub_col_names - {'qux'},
)
for k, some_dataset_column in some_dataset_map.items():
sub_dataset_column = sub_dataset_new_col_map[k]
self.assertIsNot(
some_dataset_column,
sub_dataset_column,
'subclass column %r should not have the same identity as'
' the parent' % k,
)
self.assertEqual(
some_dataset_column.dtype,
sub_dataset_column.dtype,
'subclass column %r should have the same dtype as the parent' %
k,
)
@parameter_space(
dtype_=[categorical_dtype, int64_dtype],
outputs_=[('a',), ('a', 'b')],
)
def test_reject_multi_output_classifiers(self, dtype_, outputs_):
"""
Multi-output CustomClassifiers don't work because they use special
output allocation for string arrays.
"""
class SomeClassifier(CustomClassifier):
dtype = dtype_
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
outputs = outputs_
missing_value = dtype_.type('123')
expected_error = (
"SomeClassifier does not support custom outputs, "
"but received custom outputs={outputs}.".format(outputs=outputs_)
)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
with self.assertRaises(ValueError) as e:
SomeClassifier()
self.assertEqual(str(e.exception), expected_error)
def test_unreasonable_missing_values(self):
for base_type, dtype_, bad_mv in ((Factor, float64_dtype, 'ayy'),
(Filter, bool_dtype, 'lmao'),
(Classifier, int64_dtype, 'lolwut'),
(Classifier, categorical_dtype, 7)):
class SomeTerm(base_type):
inputs = ()
window_length = 0
missing_value = bad_mv
dtype = dtype_
with self.assertRaises(TypeError) as e:
SomeTerm()
prefix = (
"^Missing value {mv!r} is not a valid choice "
"for term SomeTerm with dtype {dtype}.\n\n"
"Coercion attempt failed with:"
).format(mv=bad_mv, dtype=dtype_)
self.assertRegexpMatches(str(e.exception), prefix)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.