content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""Euler explicit time advancement routine"""
from .projection import predictor, corrector, divergence
from .stats import stats
def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"""
Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations
Arguments
---------
gridc : object
Grid object for cell centered variables
gridx : object
Grid object for x-face variables
gridy : object
Grid object for y-face variables
scalars: object
Scalars object to access time-step and Reynold number
grid_var_list : list
List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure
predcorr : string
Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector'
"""
velc = grid_var_list[0]
hvar = grid_var_list[1]
divv = grid_var_list[2]
pres = grid_var_list[3]
if(predcorr == 'predictor'):
# Calculate predicted velocity: u* = dt*H(u^n)
predictor(gridx, gridy, velc, hvar, scalars.variable['Re'], scalars.variable['dt'])
if(predcorr == 'divergence'):
# Calculate RHS for the pressure Poission solver div(u)/dt
divergence(gridc, gridx, gridy, velc, divv, ifac = scalars.variable['dt'])
elif(predcorr == 'corrector'):
# Calculate corrected velocity u^n+1 = u* - dt * grad(P)
corrector(gridc, gridx, gridy, velc, pres, scalars.variable['dt'])
# Calculate divergence of the corrected velocity to display stats
divergence(gridc, gridx, gridy, velc, divv)
# Calculate stats
scalars.stats.update(stats(gridc, gridx, gridy, velc, pres, divv))
| 30.457627 | 117 | 0.657206 | [
"BSD-3-Clause"
] | AbhilashReddyM/flowX | flowx/ins/euler.py | 1,797 | Python |
"""
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=color, label=six.next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| 33.584 | 94 | 0.579085 | [
"MIT"
] | Owen-Gillespie/BeachHacks-ShowerSuite | lib/matplotlib/stackplot.py | 4,198 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenMiniInnerversionOnlinePublishModel import AlipayOpenMiniInnerversionOnlinePublishModel
class AlipayOpenMiniInnerversionOnlinePublishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenMiniInnerversionOnlinePublishModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenMiniInnerversionOnlinePublishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.innerversion.online.publish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.165517 | 166 | 0.652302 | [
"Apache-2.0"
] | articuly/alipay-sdk-python-all | alipay/aop/api/request/AlipayOpenMiniInnerversionOnlinePublishRequest.py | 4,084 | Python |
def up(cursor, bot):
cursor.execute(
"""
CREATE TABLE timeouts(
id SERIAL PRIMARY KEY,
active BOOL DEFAULT TRUE NOT NULL,
user_id TEXT REFERENCES "user"(discord_id) ON DELETE CASCADE NOT NULL,
issued_by_id TEXT REFERENCES "user"(discord_id) ON DELETE SET NULL,
unbanned_by_id TEXT REFERENCES "user"(discord_id) ON DELETE SET NULL,
ban_reason TEXT,
unban_reason TEXT,
until TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL
)
"""
)
| 30.941176 | 78 | 0.636882 | [
"MIT"
] | EMorf/greenbot | greenbot/migration_revisions/db/0004_timeouts.py | 526 | Python |
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import bmesh
from ..com.gltf2_blender_extras import set_extras
from .gltf2_blender_material import BlenderMaterial
from .gltf2_blender_primitive import BlenderPrimitive
class BlenderMesh():
"""Blender Mesh."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, mesh_idx, skin_idx):
"""Mesh creation."""
pymesh = gltf.data.meshes[mesh_idx]
# Create one bmesh, add all primitives to it, and then convert it to a
# mesh.
bme = bmesh.new()
# List of all the materials this mesh will use. The material each
# primitive uses is set by giving an index into this list.
materials = []
# Process all primitives
for prim in pymesh.primitives:
if prim.material is None:
material_idx = None
else:
pymaterial = gltf.data.materials[prim.material]
vertex_color = None
if 'COLOR_0' in prim.attributes:
vertex_color = 'COLOR_0'
# Create Blender material if needed
if vertex_color not in pymaterial.blender_material:
BlenderMaterial.create(gltf, prim.material, vertex_color)
material_name = pymaterial.blender_material[vertex_color]
material = bpy.data.materials[material_name]
try:
material_idx = materials.index(material.name)
except ValueError:
materials.append(material.name)
material_idx = len(materials) - 1
BlenderPrimitive.add_primitive_to_bmesh(gltf, bme, pymesh, prim, skin_idx, material_idx)
name = pymesh.name or 'Mesh_' + str(mesh_idx)
mesh = bpy.data.meshes.new(name)
BlenderMesh.bmesh_to_mesh(gltf, pymesh, bme, mesh)
bme.free()
for name_material in materials:
mesh.materials.append(bpy.data.materials[name_material])
mesh.update()
set_extras(mesh, pymesh.extras, exclude=['targetNames'])
# Clear accessor cache after all primitives are done
gltf.accessor_cache = {}
return mesh
@staticmethod
def bmesh_to_mesh(gltf, pymesh, bme, mesh):
bme.to_mesh(mesh)
# Unfortunately need to do shapekeys/normals/smoothing ourselves.
# Shapekeys
if len(bme.verts.layers.shape) != 0:
# The only way I could find to create a shape key was to temporarily
# parent mesh to an object and use obj.shape_key_add.
tmp_ob = None
try:
tmp_ob = bpy.data.objects.new('##gltf-import:tmp-object##', mesh)
tmp_ob.shape_key_add(name='Basis')
mesh.shape_keys.name = mesh.name
for layer_name in bme.verts.layers.shape.keys():
tmp_ob.shape_key_add(name=layer_name)
key_block = mesh.shape_keys.key_blocks[layer_name]
layer = bme.verts.layers.shape[layer_name]
for i, v in enumerate(bme.verts):
key_block.data[i].co = v[layer]
finally:
if tmp_ob:
bpy.data.objects.remove(tmp_ob)
# Normals
mesh.update()
if gltf.import_settings['import_shading'] == "NORMALS":
mesh.create_normals_split()
use_smooths = [] # whether to smooth for each poly
face_idx = 0
for prim in pymesh.primitives:
if gltf.import_settings['import_shading'] == "FLAT" or \
'NORMAL' not in prim.attributes:
use_smooths += [False] * prim.num_faces
elif gltf.import_settings['import_shading'] == "SMOOTH":
use_smooths += [True] * prim.num_faces
elif gltf.import_settings['import_shading'] == "NORMALS":
mesh_loops = mesh.loops
for fi in range(face_idx, face_idx + prim.num_faces):
poly = mesh.polygons[fi]
# "Flat normals" are when all the vertices in poly have the
# poly's normal. Otherwise, smooth the poly.
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vi = mesh_loops[loop_idx].vertex_index
if poly.normal.dot(bme.verts[vi].normal) <= 0.9999999:
use_smooths.append(True)
break
else:
use_smooths.append(False)
else:
# shouldn't happen
assert False
face_idx += prim.num_faces
mesh.polygons.foreach_set('use_smooth', use_smooths)
# Custom normals, now that every update is done
if gltf.import_settings['import_shading'] == "NORMALS":
custom_normals = [v.normal for v in bme.verts]
mesh.normals_split_custom_set_from_vertices(custom_normals)
mesh.use_auto_smooth = True
| 39.39726 | 100 | 0.595793 | [
"Apache-2.0"
] | MrTheRich/glTF-Blender-IO | addons/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py | 5,752 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import aiokubernetes
from aiokubernetes.models.v1beta1_custom_resource_subresources import V1beta1CustomResourceSubresources # noqa: E501
from aiokubernetes.rest import ApiException
class TestV1beta1CustomResourceSubresources(unittest.TestCase):
"""V1beta1CustomResourceSubresources unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceSubresources(self):
"""Test V1beta1CustomResourceSubresources"""
# FIXME: construct object with mandatory attributes with example values
# model = aiokubernetes.models.v1beta1_custom_resource_subresources.V1beta1CustomResourceSubresources() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.487805 | 125 | 0.75046 | [
"Apache-2.0"
] | olitheolix/aiokubernetes | test/test_v1beta1_custom_resource_subresources.py | 1,086 | Python |
# QAP Gemini
#
# adcclib.py
# ------------------------------------------------------------------------------
import os
import sys
import signal
import time
from copy import copy
from threading import Event
from threading import Thread
from recipe_system.adcc.servers import http_proxy
from recipe_system.adcc.servers import eventsManager
from recipe_system.config import globalConf
from recipe_system.config import STANDARD_REDUCTION_CONF
from recipe_system.utils.findexe import findexe
# ------------------------------------------------------------------------------
def get_adcc_dir(dirtitle="adcc"):
dotadcc = {"adcc": ".adcc"}
if not os.path.exists(dotadcc[dirtitle]):
os.mkdir(dotadcc[dirtitle])
return dotadcc[dirtitle]
def write_adcc_sr(srname, vals):
print("adcclib: adcc startup report in {}".format(srname))
with open(srname, "w+") as sr:
sr.write(repr(vals))
return
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class ADCC(metaclass=Singleton):
def __init__(self, args=None):
if args is None:
pass
else:
self.dark = args.dark
self.events = eventsManager.EventsManager()
self.spec_events = eventsManager.EventsManager()
self.http_port = args.httpport
self.sreport = args.adccsrn
self.racefile = "adccinfo.py"
self.verbose = args.verbosity
self.web = None
def _check_adcc(self, cpid):
adccproc = findexe('adcc')
xprocx = copy(adccproc)
msg = "adcclib: adcc process {} running."
try:
xprocx.pop(adccproc.index(cpid))
except ValueError as err:
pass
x = [print(msg.format(p)) for p in xprocx]
return xprocx
def _check_kill_adcc(self, pids):
for pid in pids:
os.kill(int(pid), signal.SIGKILL)
return
def _http_interface(self, run_event):
# establish HTTP server and proxy.
self.web = Thread(group=None, target=http_proxy.main, name="webface",
args=(run_event,),
kwargs={
'port': self.http_port,
'dark': self.dark,
'events': self.events,
'spec_events': self.spec_events,
'verbose': self.verbose
}
)
return
def _handle_locks(self):
curpid = os.getpid()
adccdir = get_adcc_dir()
lockf = os.path.join(adccdir, self.racefile)
lfile = True if os.path.exists(lockf) else False
pids = self._check_adcc(curpid)
msgs = {
'lockrun': "adcclib: adcc running and lockfile detected.",
'portrun': "adcclib: adcc running on port {}",
'norun': "adcclib: No adcc running but lockfile found.",
'rupted': "adcclib: adcc config appears corrupted. Clearing ..."
}
if pids and lfile:
sys.exit(msgs['lockrun'])
elif pids and not lfile:
sys.exit(msgs['portrun'].format(self.http_port))
elif lfile and not pids:
print(msgs['norun'])
print(msgs['rupted'])
os.unlink(lockf)
return
def _write_locks(self):
"""
Write racefile and ADCC Startup Report
"""
dotadcc = get_adcc_dir()
vals = {"http_port": self.http_port, "pid": os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, "w") as ports:
ports.write(repr(vals))
sr = os.path.join(dotadcc, self.sreport)
write_adcc_sr(sr, vals)
return
def main(self):
globalConf.load(STANDARD_REDUCTION_CONF, env_override=True)
self._handle_locks()
self._write_locks()
# start webinterface
run_event = Event()
run_event.set()
self._http_interface(run_event)
self.web.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\nadcc: exiting due to Ctrl-C")
run_event.clear()
self.web.join()
if os.path.exists(self.racefile):
os.remove(self.racefile)
return
| 30.86755 | 80 | 0.534006 | [
"BSD-3-Clause"
] | Luke-Ludwig/DRAGONS | recipe_system/adcc/adcclib.py | 4,661 | Python |
from setuptools import setup, find_packages
setup(
name='gatas',
version='1.0.0',
packages=find_packages(exclude=('tests', 'tests.*')),
python_requires='~=3.7',
install_requires=[
'mlflow~=1.8',
'defopt~=6.0',
'numba~=0.49',
'numpy~=1.18.0',
's3fs~=0.4.0',
'scikit-learn~=0.22.0',
'tensorflow~=1.15',
])
| 21.444444 | 57 | 0.520725 | [
"MIT"
] | Wattpad/gatas | setup.py | 386 | Python |
from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import PandasColumn, create_dagster_pandas_dataframe_type
from dagster_pandas.constraints import (
ColumnConstraint,
ColumnConstraintViolationException,
ColumnDTypeInSetConstraint,
)
from pandas import DataFrame, read_csv
# start_custom_col
class DivisibleByFiveConstraint(ColumnConstraint):
def __init__(self):
message = "Value must be divisible by 5"
super(DivisibleByFiveConstraint, self).__init__(
error_description=message, markdown_description=message
)
def validate(self, dataframe, column_name):
rows_with_unexpected_buckets = dataframe[dataframe[column_name].apply(lambda x: x % 5 != 0)]
if not rows_with_unexpected_buckets.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=rows_with_unexpected_buckets,
)
CustomTripDataFrame = create_dagster_pandas_dataframe_type(
name="CustomTripDataFrame",
columns=[
PandasColumn(
"amount_paid",
constraints=[ColumnDTypeInSetConstraint({"int64"}), DivisibleByFiveConstraint()],
)
],
)
# end_custom_col
@op(out=Out(CustomTripDataFrame))
def load_custom_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
dtype={"color": "category"},
)
@job
def custom_column_constraint_trip():
load_custom_trip_dataframe()
| 31.172414 | 100 | 0.705199 | [
"Apache-2.0"
] | StratoDem/dagster | examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/custom_column_constraint.py | 1,808 | Python |
import pytest
from aiogoogle.resource import (
Resource,
GoogleAPI,
STACK_QUERY_PARAMETER_DEFAULT_VALUE,
STACK_QUERY_PARAMETERS,
)
from ..test_globals import ALL_APIS
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_getitem(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
assert api["name"] == discovery_document.get("name")
assert api["version"] == discovery_document.get("version")
assert api["auth"] == discovery_document.get("auth")
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_properties(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
assert (name in str(api)) or (discovery_document.get("title") in str(api))
assert (version in str(api)) or (discovery_document.get("title") in str(api))
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_constructor(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
assert id(api.discovery_document) == id(api.discovery_document)
assert api["resources"] == discovery_document.get("resources")
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_getattr(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
for resource_name, _ in discovery_document.get("resources").items():
resource_object = getattr(api, resource_name)
assert isinstance(resource_object, Resource)
assert resource_name == resource_object.name
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_repr(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
name = discovery_document.get("name")
version = discovery_document.get("version")
base_url = discovery_document.get("baseUrl")
assert name in str(api) and version in str(api) and base_url in str(api)
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_len(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
methods_len = (
len(discovery_document.get("methods"))
if discovery_document.get("methods")
else 0
)
resources_len = (
len(discovery_document.get("resources"))
if discovery_document.get("resources")
else 0
)
assert len(api) == methods_len + resources_len
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_resources_getattr_fails_on_unknown_resource(
open_discovery_document, name, version
):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
nonexistent_resource = "I_MUST_NEVER_EXIST_ANYWHEREEEEEEEE"
with pytest.raises(AttributeError):
getattr(api, nonexistent_resource)
@pytest.mark.parametrize("name,version", ALL_APIS)
def test_attaches_global_params(open_discovery_document, name, version):
discovery_document = open_discovery_document(name, version)
api = GoogleAPI(discovery_document=discovery_document)
for param in STACK_QUERY_PARAMETERS:
assert param in api["parameters"]
assert api["parameters"][param] == STACK_QUERY_PARAMETER_DEFAULT_VALUE
| 39.858696 | 81 | 0.760295 | [
"MIT"
] | dergunovalexey/aiogoogle | tests/test_units/test_GoogleAPI.py | 3,667 | Python |
from . import ( # noqa
ideal_qr,
ideal_qr_notification,
payment,
payment_request,
payment_request_notification,
refund,
sandbox_app,
transaction_bundle,
transactions_notifications,
)
| 18.333333 | 33 | 0.7 | [
"MIT"
] | new10com/python-tikkie-api | src/tikkie2/v2/__init__.py | 220 | Python |
from model.contact import Contact
from random import randrange
def test_add_contact(app,db,check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstName='test'))
old_contact = db.get_contact_list()
index = randrange(len(old_contact))
contact = Contact(firstName="firstName", middleName="middleName", lastName="lastName", nickName="nickName", title="title",
company="company", address="address", home="home", mobile="mobile", work="work", fax="fax", email="email",
email2="email2", email3="email3", homepage="homepage", address2="address2", phone2="phone2", notes="notes")
contact.id=old_contact[index].id
app.contact.edit_contact_by_id(contact.id, contact)
new_contact = db.get_contact_list()
assert len(old_contact) == len(new_contact)
old_contact[index]= contact
assert sorted(old_contact, key=Contact.id_or_map) == sorted(new_contact, key=Contact.id_or_map)
if check_ui:
assert sorted(new_contact, key=Contact.id_or_map) == sorted(app.contact.get_contact_list(), key=Contact.id_or_map) | 62.888889 | 138 | 0.693463 | [
"Apache-2.0"
] | AIdegrade/testing | tests/test_edit_first_contact.py | 1,132 | Python |
# coding: utf-8
"""
jinja2schema.config
~~~~~~~~~~~~~~~~~~~
"""
from .order_number import OrderNumber
class Config(object):
"""Configuration."""
TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = 'dictionary'
"""Possible values: ``"dictionary"`` or ``"list""``.
For example, in the expression ``xs[a]`` variable ``xs`` may be a list as well as a dictionary.
This setting is used to choose between a dictionary and a list when the variable is
being indexed with another variable.
"""
TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE = 'list'
"""Possible values: ``"dictionary"``, ``"list"`` or ``"tuple"``.
For example, in the expression ``xs[2]`` variable ``xs`` may be a list as well as a dictionary or a tuple.
This setting is used to choose between a dictionary, a tuple and a list when the variable is
being indexed with an integer.
"""
BOOLEAN_CONDITIONS = False
"""Whether or not to consider conditions in ``if`` statements as boolean.
If this variable is not set, ``xs`` variable in template ``{% if xs %}{% endif %}`` will have
unknown structure. If this variable is set, ``xs`` will be a boolean.
"""
PACKAGE_NAME = ''
"""Name of the package where you want to load templates from.
This configuration is for if you are using includes in your jinja templates. This tells jinja
where to look to be able to load the included template from. If you do not plan on using ``includes``
this configuration is not needed.
"""
TEMPLATE_DIR = 'templates'
"""Name of the directory where you want to load templates from. Defaulted to ``templates``
This configuration is for if you are using includes in your jinja templates. This tells jinja
which directoy to look to be able to load the included template from. If you do not plan on using ``includes``
this configuration is not needed.
"""
ORDER_NUMBER = False
"""Add a order number to each node
Add a order number to make schema sortable.
"""
ORDER_NUMBER_SUB_COUNTER = True
"""Independent subsection order numbers
Use a separate counter in subsections as order number creator.
"""
IGNORE_UNKNOWN_FILTERS = False
"""Ignore unknown filters
Can be True to ignore all unknown filters, False (by default, to keep the original behavior), or a list
(set or tuple) containing the filter names to ignore.
Those filters will always accept Unknown and return Unknown.
"""
def __init__(self,
TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE='dictionary',
TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE='list',
BOOLEAN_CONDITIONS=False,
PACKAGE_NAME='',
TEMPLATE_DIR='templates',
ORDER_NUMBER=False,
ORDER_NUMBER_SUB_COUNTER=True,
IGNORE_UNKNOWN_FILTERS=False):
if TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE not in ('dictionary', 'list'):
raise ValueError('TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE must be'
'either "dictionary" or "list"')
if TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE not in ('dictionary', 'list', 'tuple'):
raise ValueError('TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE must be'
'either "dictionary", "tuple" or "list"')
self.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE = TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE
self.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE
self.BOOLEAN_CONDITIONS = BOOLEAN_CONDITIONS
self.PACKAGE_NAME = PACKAGE_NAME
self.TEMPLATE_DIR = TEMPLATE_DIR
self.ORDER_NUMBER = ORDER_NUMBER
self.ORDER_OBJECT = OrderNumber(number=1, enabled=self.ORDER_NUMBER,
sub_counter_enabled=ORDER_NUMBER_SUB_COUNTER)
self.IGNORE_UNKNOWN_FILTERS = IGNORE_UNKNOWN_FILTERS
default_config = Config()
| 40.717172 | 114 | 0.678492 | [
"BSD-3-Clause"
] | jmjjg/jinja2schema | jinja2schema/config.py | 4,031 | Python |
from typing import Any, List, Literal, TypedDict
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Element import FHIR_Element
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# A record of a clinical assessment performed to determine what problem(s) may affect the patient and before planning the treatments or management strategies that are best to manage a patient's condition. Assessments are often 1:1 with a clinical consultation / encounter, but this varies greatly depending on the clinical workflow. This resource is called "ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion with the recording of assessment tools such as Apgar score.
FHIR_ClinicalImpression_Finding = TypedDict(
"FHIR_ClinicalImpression_Finding",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Specific text or code for finding or diagnosis, which may include ruled-out or resolved conditions.
"itemCodeableConcept": FHIR_CodeableConcept,
# Specific reference for finding or diagnosis, which may include ruled-out or resolved conditions.
"itemReference": FHIR_Reference,
# Which investigations support finding or diagnosis.
"basis": FHIR_string,
# Extensions for basis
"_basis": FHIR_Element,
},
total=False,
)
| 95.37931 | 836 | 0.780188 | [
"Apache-2.0"
] | anthem-ai/fhir-types | src/fhir_types/FHIR_ClinicalImpression_Finding.py | 2,766 | Python |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
'''
ComputeCost computes the cost function
'''
def computeCost(X, y, theta):
#computeCost Compute cost for linear regression
# J = computeCost(X, y, theta) computes the cost of using theta as the
# parameter for linear regression to fit the data points in X and y
# Initialize some useful values
m = len(y); # number of training examples
# You need to return the following variables correctly
J = 0;
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost of a particular choice of theta
# =========================================================================
# You should set J to the cost.
X_product = np.matmul(X,theta) # X*theta
X_diff = np.subtract(X_product, y) # X*theta - y
X_square = np.square(X_diff) # Square each element of the matrix computed above
X_sum = np.sum(X_square) # Sum all the elements
J = (1.0/(2.0*m))*X_sum # Cost Function
return J
'''
gradientDescent function iterates till it finds a minima
'''
def gradientDescent(X, y, theta, alpha, num_iters):
#function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
#GRADIENTDESCENT Performs gradient descent to learn theta
# theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by
# taking num_iters gradient steps with learning rate alpha
# Initialize some useful values
m = len(y); # number of training examples
J_history = np.zeros((num_iters, 1));
for iter in range(num_iters):
# ====================== YOUR CODE HERE ======================
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCost) and gradient here.
#
X_0 = X[:,0].reshape((m,1));
X_1 = X[:,1].reshape((m,1));
X_0_tr = np.transpose(X_0)
X_1_tr = np.transpose(X_1)
X_theta_prod = (np.matmul(X,theta)).reshape((m,1))
X_theta_y_diff = np.subtract(X_theta_prod,y)
theta_0 = theta.item(0) - (float(alpha)/float(m))*(np.matmul(X_0_tr, X_theta_y_diff)).item(0)
theta_1 = theta.item(1) - (float(alpha)/float(m))*(np.matmul(X_1_tr, X_theta_y_diff)).item(0)
#print X_0.shape, X_0_tr.shape, theta.shape, X.shape, X_theta_prod.shape, y.shape, X_theta_y_diff.shape
theta = np.array([theta_0, theta_1]).reshape((2,1))
# Plot the linear fit
if(iter%200==0):
plt.scatter(X_data, y_data, marker='o', color='g', label='orig')
y_data_predicted = np.matmul(X,theta)
plt.plot(X_data, y_data_predicted, marker='*', linestyle='-', color='b', label='pred')
plt.legend(loc='lower right')
plt.show(block=False)
time.sleep(3)
plt.close()
# ============================================================
# Save the cost J in every iteration
J_history[iter] = computeCost(X, y, theta)
print "Cost @ iteration: ",iter, " = ", J_history[iter]
return theta
data = pd.read_csv('ex1data1.txt', header = None, names = ['Population', 'Profits'])
y_data = data.iloc[:,1]
X_data = data.iloc[:,0]
m = len(y_data) #Number of training samples
y = np.array(y_data).reshape(m,1)
X = np.c_[np.ones(m), np.array(X_data)] # Add a column of ones to x
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Population - Profit Scatter Plot')
ax.set_xlabel('Population in 10000s')
ax.set_ylabel('Profit in 10000$')
theta = np.zeros((2, 1)).reshape((2,1)) # initialize fitting parameters
theta = np.array([40,40]).reshape((2,1))# Try initializing from a different point. The convergence will be seen easily
print "Cost Function Value is:", computeCost(X, y, theta)
# Some gradient descent settings
iterations = 1500;
alpha = 0.01;
# run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations);
# print theta to screen
print 'Theta found by gradient descent: ', theta.item(0), theta.item(1)
# Plot the linear fit
plt.scatter(X_data, y_data, marker='o', color='g', label='orig')
y_data_predicted = np.matmul(X,theta)
plt.plot(X_data, y_data_predicted, marker='*', linestyle='-', color='b', label='pred')
plt.legend(loc='lower right')
# Predict values for population sizes of 35,000 and 70,000
#predict1 = [1, 3.5] *theta;
#fprintf('For population = 35,000, we predict a profit of %f\n',...
# predict1*10000);
#predict2 = [1, 7] * theta;
#fprintf('For population = 70,000, we predict a profit of %f\n',...
# predict2*10000);
| 34.9 | 119 | 0.652854 | [
"MIT"
] | rishisidhu/MachineLearning | Linear_Regression/ex1.py | 4,537 | Python |
# Mobile Verification Toolkit (MVT)
# Copyright (c) 2021-2022 The MVT Project Authors.
# Use of this software is governed by the MVT License 1.1 that can be found at
# https://license.mvt.re/1.1/
import logging
import os
import click
from rich.logging import RichHandler
from mvt.common.help import (HELP_MSG_FAST, HELP_MSG_IOC,
HELP_MSG_LIST_MODULES, HELP_MSG_MODULE,
HELP_MSG_OUTPUT, HELP_MSG_SERIAL)
from mvt.common.indicators import Indicators, download_indicators_files
from mvt.common.logo import logo
from mvt.common.module import run_module, save_timeline
from .download_apks import DownloadAPKs
from .lookups.koodous import koodous_lookup
from .lookups.virustotal import virustotal_lookup
from .modules.adb import ADB_MODULES
from .modules.backup import BACKUP_MODULES
# Setup logging using Rich.
LOG_FORMAT = "[%(name)s] %(message)s"
logging.basicConfig(level="INFO", format=LOG_FORMAT, handlers=[
RichHandler(show_path=False, log_time_format="%X")])
log = logging.getLogger(__name__)
#==============================================================================
# Main
#==============================================================================
@click.group(invoke_without_command=False)
def cli():
logo()
#==============================================================================
# Command: version
#==============================================================================
@cli.command("version", help="Show the currently installed version of MVT")
def version():
return
#==============================================================================
# Download APKs
#==============================================================================
@cli.command("download-apks", help="Download all or non-safelisted installed APKs installed on the device")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--all-apks", "-a", is_flag=True,
help="Extract all packages installed on the phone, including system packages")
@click.option("--virustotal", "-v", is_flag=True, help="Check packages on VirusTotal")
@click.option("--koodous", "-k", is_flag=True, help="Check packages on Koodous")
@click.option("--all-checks", "-A", is_flag=True, help="Run all available checks")
@click.option("--output", "-o", type=click.Path(exists=False),
help="Specify a path to a folder where you want to store the APKs")
@click.option("--from-file", "-f", type=click.Path(exists=True),
help="Instead of acquiring from phone, load an existing packages.json file for lookups (mainly for debug purposes)")
@click.pass_context
def download_apks(ctx, all_apks, virustotal, koodous, all_checks, output, from_file, serial):
try:
if from_file:
download = DownloadAPKs.from_json(from_file)
else:
# TODO: Do we actually want to be able to run without storing any file?
if not output:
log.critical("You need to specify an output folder with --output!")
ctx.exit(1)
if not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
download = DownloadAPKs(output_folder=output, all_apks=all_apks,
log=logging.getLogger(DownloadAPKs.__module__))
if serial:
download.serial = serial
download.run()
packages = download.packages
if len(packages) == 0:
return
if virustotal or all_checks:
virustotal_lookup(packages)
if koodous or all_checks:
koodous_lookup(packages)
except KeyboardInterrupt:
print("")
ctx.exit(1)
#==============================================================================
# Checks through ADB
#==============================================================================
@cli.command("check-adb", help="Check an Android device over adb")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--output", "-o", type=click.Path(exists=False),
help=HELP_MSG_OUTPUT)
@click.option("--fast", "-f", is_flag=True, help=HELP_MSG_FAST)
@click.option("--list-modules", "-l", is_flag=True, help=HELP_MSG_LIST_MODULES)
@click.option("--module", "-m", help=HELP_MSG_MODULE)
@click.pass_context
def check_adb(ctx, iocs, output, fast, list_modules, module, serial):
if list_modules:
log.info("Following is the list of available check-adb modules:")
for adb_module in ADB_MODULES:
log.info(" - %s", adb_module.__name__)
return
log.info("Checking Android through adb bridge")
if output and not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
timeline = []
timeline_detected = []
for adb_module in ADB_MODULES:
if module and adb_module.__name__ != module:
continue
m = adb_module(output_folder=output, fast_mode=fast,
log=logging.getLogger(adb_module.__module__))
if indicators.total_ioc_count:
m.indicators = indicators
m.indicators.log = m.log
if serial:
m.serial = serial
run_module(m)
timeline.extend(m.timeline)
timeline_detected.extend(m.timeline_detected)
if output:
if len(timeline) > 0:
save_timeline(timeline, os.path.join(output, "timeline.csv"))
if len(timeline_detected) > 0:
save_timeline(timeline_detected, os.path.join(output, "timeline_detected.csv"))
#==============================================================================
# Check ADB backup
#==============================================================================
@cli.command("check-backup", help="Check an Android Backup")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--output", "-o", type=click.Path(exists=False), help=HELP_MSG_OUTPUT)
@click.argument("BACKUP_PATH", type=click.Path(exists=True))
@click.pass_context
def check_backup(ctx, iocs, output, backup_path, serial):
log.info("Checking ADB backup located at: %s", backup_path)
if output and not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
if os.path.isfile(backup_path):
log.critical("The path you specified is a not a folder!")
if os.path.basename(backup_path) == "backup.ab":
log.info("You can use ABE (https://github.com/nelenkov/android-backup-extractor) "
"to extract 'backup.ab' files!")
ctx.exit(1)
for module in BACKUP_MODULES:
m = module(base_folder=backup_path, output_folder=output,
log=logging.getLogger(module.__module__))
if indicators.total_ioc_count:
m.indicators = indicators
m.indicators.log = m.log
if serial:
m.serial = serial
run_module(m)
#==============================================================================
# Command: check-iocs
#==============================================================================
@cli.command("check-iocs", help="Compare stored JSON results to provided indicators")
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--list-modules", "-l", is_flag=True, help=HELP_MSG_LIST_MODULES)
@click.option("--module", "-m", help=HELP_MSG_MODULE)
@click.argument("FOLDER", type=click.Path(exists=True))
@click.pass_context
def check_iocs(ctx, iocs, list_modules, module, folder):
all_modules = []
for entry in BACKUP_MODULES + ADB_MODULES:
if entry not in all_modules:
all_modules.append(entry)
if list_modules:
log.info("Following is the list of available check-iocs modules:")
for iocs_module in all_modules:
log.info(" - %s", iocs_module.__name__)
return
log.info("Checking stored results against provided indicators...")
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
total_detections = 0
for file_name in os.listdir(folder):
name_only, ext = os.path.splitext(file_name)
file_path = os.path.join(folder, file_name)
# TODO: Skipping processing of result files that are not json.
# We might want to revisit this eventually.
if ext != ".json":
continue
for iocs_module in all_modules:
if module and iocs_module.__name__ != module:
continue
if iocs_module().get_slug() != name_only:
continue
log.info("Loading results from \"%s\" with module %s", file_name,
iocs_module.__name__)
m = iocs_module.from_json(file_path,
log=logging.getLogger(iocs_module.__module__))
if indicators.total_ioc_count > 0:
m.indicators = indicators
m.indicators.log = m.log
try:
m.check_indicators()
except NotImplementedError:
continue
else:
total_detections += len(m.detected)
if total_detections > 0:
log.warning("The check of the results produced %d detections!",
total_detections)
#==============================================================================
# Command: download-iocs
#==============================================================================
@cli.command("download-iocs", help="Download public STIX2 indicators")
def download_indicators():
download_indicators_files(log)
| 38.551095 | 130 | 0.573795 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | dangaffey/mvt | mvt/android/cli.py | 10,563 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import unittest
from tracspamfilter.filters.tests import akismet, bayes, extlinks, regex, \
session
def test_suite():
suite = unittest.TestSuite()
suite.addTest(akismet.test_suite())
suite.addTest(bayes.test_suite())
suite.addTest(extlinks.test_suite())
suite.addTest(regex.test_suite())
suite.addTest(session.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 32.387097 | 76 | 0.691235 | [
"BSD-2-Clause"
] | Puppet-Finland/puppet-trac | files/spam-filter/tracspamfilter/filters/tests/__init__.py | 1,004 | Python |
#!/usr/bin/python
import re, random, sys, difflib
random.seed(123)
for i, line in enumerate(sys.stdin.readlines()):
if i % 1000 == 0: print >>sys.stderr, i, "..."
if i>0 and re.search(r'^id\tsentiment', line): continue # combined files, ignore multiple header rows
line = re.sub(r'\n$', '', line) # strip trailing newlines
# "0003b8d" <tab>1<tab> }\n \n u32 cik_gfx_get_wptr(struct radeon_device *rdev,\n \t\t struct radeon_ri
fields = line.split('\t', 2)
if fields[2] == '': continue # corruption due to empty commits, i.e. no applicable code...
fields[2] = '\\n'.join(fields[2].split('\\n')[0:25]) # keep <=25 lines
f2 = fields[2] = re.sub(r'[^\x09,\x0A,\x20-\x7E]', '.', fields[2]) # cleanup non-ASCII
r = random.randint(0,99) # augment x% of the time, i.e. don't go crazy
if fields[1] == '0':
# no bug - harmless transforms
res = []
if r % 10 == 0: # 10% of the time
f2 = re.sub(r'/[*].*?[*]/|//.*?(\\n)', '\1', f2)
# inject spaces and newlines
for i in range(len(f2)-1):
c = f2[i]
# lines end in newlines, so no risk of running off the end
if c == '\\':
c2 = f2[i+1]
if c2 == ' ' and r < 3: res.append(' ') # add a space
elif c2 == 'n' and r < 5: res.append('\\n\\') # add a newline
elif c2 == 'n' and r < 7: res.append(' \\') # add extra trailing whitespace
elif c2 == 't' and r < 3: res.append(' \\') # extra space before tab
elif c2 == 't' and r < 5: res.append('\\t ') # extra space after tabs
### your ideas here ###
else: res.append(c)
elif c in '{}[]':
r = random.randint(0,99)
if r < 3: res.append(' ') # add a space
### your ideas here ###
else: res.append(c)
else: res.append(c)
newf2 = ''.join(res)+f2[-1]
else: # fields[1] == '1'
# contains a bug - harmful transform
r = random.randint(0,99)
if r < 50:
# swap if/then clauses - may introduce syntax errors
newf2 = re.sub(r'(if[^(]*[(].+?[)][^{]*){(.+?)}(.*?then.*?){(.*?)}', r'\1{\4}\3{\2}', f2)
# change comparison operators - since ==/!= is used for other datatypes, keep separate from </>
# note: pick random operator to avoid real parsing
newf2 = re.sub(r'([a-zA-Z0-9_] *)(<=?|>=?)( *[a-zA-Z0-9_])', r'\1'+['<','<=','>','>='][r%4]+r'\3', newf2)
newf2 = re.sub(r'([a-zA-Z0-9_] *)(==|!=)( *[a-zA-Z0-9_])', r'\1'+['==','!='][r%2]+r'\3', newf2)
newf2 = re.sub(r'([a-zA-Z0-9_] *)(&&|[|][|])( *[a-zA-Z0-9_])', r'\1'+['==','!='][r%2]+r'\3', newf2)
# muck numbers
# 201 - 99...99 doesn't end in 0, not binary, etc.
newf2 = re.sub(r'([2-9][0-9]+[1-9])', str(r*random.randint(0,99)+200), newf2)
else:
newf2 = f2
print '\t'.join(fields)
if newf2 != fields[2]:
print '\t'.join([re.sub(r'"$', 'beef"', fields[0]), fields[1], newf2])
#print 'diff:\n' + ''.join(difflib.unified_diff(fields[2].replace('\\n','\n'), newf2.replace('\\n','\n')))
| 52.741935 | 117 | 0.478287 | [
"MIT"
] | asah/codesmell | augment-c_and_cpp.py | 3,270 | Python |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event sequence RNN model."""
import collections
import copy
import functools
from magenta.common import beam_search
from magenta.common import state_util
from magenta.models.shared import events_rnn_graph
import magenta.music as mm
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import training as contrib_training
# Model state when generating event sequences, consisting of the next inputs to
# feed the model, the current RNN state, the current control sequence (if
# applicable), and state for the current control sequence (if applicable).
ModelState = collections.namedtuple(
'ModelState', ['inputs', 'rnn_state', 'control_events', 'control_state'])
class EventSequenceRnnModelError(Exception):
pass
def _extend_control_events_default(control_events, events, state):
"""Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
events: The list of generated events.
state: State maintained while generating, unused.
Returns:
The resulting state after extending the control sequence (in this case the
state will be returned unmodified).
"""
while len(control_events) <= len(events):
control_events.append(control_events[-1])
return state
class EventSequenceRnnModel(mm.BaseModel):
"""Class for RNN event sequence generation models.
Currently this class only supports generation, of both event sequences and
note sequences (via event sequences). Support for model training will be added
at a later time.
"""
def __init__(self, config):
"""Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use.
"""
super(EventSequenceRnnModel, self).__init__()
self._config = config
def _build_graph_for_generation(self):
events_rnn_graph.get_build_graph_fn('generate', self._config)()
def _batch_size(self):
"""Extracts the batch size from the graph."""
return self._session.graph.get_collection('inputs')[0].shape[0].value
def _generate_step_for_batch(self, event_sequences, inputs, initial_state,
temperature):
"""Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by this method.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: The softmax temperature.
Returns:
final_state: The final RNN state, a numpy array the same size as
`initial_state`.
loglik: The log-likelihood of the chosen softmax value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned.
"""
assert len(event_sequences) == self._batch_size()
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_final_state = self._session.graph.get_collection('final_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = temperature
final_state, softmax = self._session.run(
[graph_final_state, graph_softmax], feed_dict)
if isinstance(softmax, list):
if softmax[0].shape[1] > 1:
softmaxes = []
for beam in range(softmax[0].shape[0]):
beam_softmaxes = []
for event in range(softmax[0].shape[1] - 1):
beam_softmaxes.append(
[softmax[s][beam, event] for s in range(len(softmax))])
softmaxes.append(beam_softmaxes)
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmaxes)
else:
loglik = np.zeros(len(event_sequences))
else:
if softmax.shape[1] > 1:
# The inputs batch is longer than a single step, so we also want to
# compute the log-likelihood of the event sequences up until the step
# we're generating.
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax[:, :-1, :])
else:
loglik = np.zeros(len(event_sequences))
indices = np.array(self._config.encoder_decoder.extend_event_sequences(
event_sequences, softmax))
if isinstance(softmax, list):
p = 1.0
for i in range(len(softmax)):
p *= softmax[i][range(len(event_sequences)), -1, indices[:, i]]
else:
p = softmax[range(len(event_sequences)), -1, indices]
return final_state, loglik + np.log(p)
def _generate_step(self, event_sequences, model_states, logliks, temperature,
extend_control_events_callback=None,
modify_events_callback=None):
"""Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A list of model states, each of which contains model inputs
and initial RNN states.
logliks: A list containing the current log-likelihood for each event
sequence.
temperature: The softmax temperature.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
event_sequences: A list of extended event sequences. These are modified in
place but also returned.
final_states: A list of resulting model states, containing model inputs
for the next step along with RNN states for each event sequence.
logliks: A list containing the updated log-likelihood for each event
sequence.
"""
# Split the sequences to extend into batches matching the model batch size.
batch_size = self._batch_size()
num_seqs = len(event_sequences)
num_batches = int(np.ceil(num_seqs / float(batch_size)))
# Extract inputs and RNN states from the model states.
inputs = [model_state.inputs for model_state in model_states]
initial_states = [model_state.rnn_state for model_state in model_states]
# Also extract control sequences and states.
control_sequences = [
model_state.control_events for model_state in model_states]
control_states = [
model_state.control_state for model_state in model_states]
final_states = []
logliks = np.array(logliks, dtype=np.float32)
# Add padding to fill the final batch.
pad_amt = -len(event_sequences) % batch_size
padded_event_sequences = event_sequences + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_amt)]
padded_inputs = inputs + [inputs[-1]] * pad_amt
padded_initial_states = initial_states + [initial_states[-1]] * pad_amt
for b in range(num_batches):
i, j = b * batch_size, (b + 1) * batch_size
pad_amt = max(0, j - num_seqs)
# Generate a single step for one batch of event sequences.
batch_final_state, batch_loglik = self._generate_step_for_batch(
padded_event_sequences[i:j],
padded_inputs[i:j],
state_util.batch(padded_initial_states[i:j], batch_size),
temperature)
final_states += state_util.unbatch(
batch_final_state, batch_size)[:j - i - pad_amt]
logliks[i:j - pad_amt] += batch_loglik[:j - i - pad_amt]
# Construct inputs for next step.
if extend_control_events_callback is not None:
# We are conditioning on control sequences.
for idx in range(len(control_sequences)):
# Extend each control sequence to ensure that it is longer than the
# corresponding event sequence.
control_states[idx] = extend_control_events_callback(
control_sequences[idx], event_sequences[idx], control_states[idx])
next_inputs = self._config.encoder_decoder.get_inputs_batch(
control_sequences, event_sequences)
else:
next_inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences)
if modify_events_callback:
# Modify event sequences and inputs for next step.
modify_events_callback(
self._config.encoder_decoder, event_sequences, next_inputs)
model_states = [ModelState(inputs=inputs, rnn_state=final_state,
control_events=control_events,
control_state=control_state)
for inputs, final_state, control_events, control_state
in zip(next_inputs, final_states,
control_sequences, control_states)]
return event_sequences, model_states, logliks
def _generate_events(self, num_steps, primer_events, temperature=1.0,
beam_size=1, branch_factor=1, steps_per_iteration=1,
control_events=None, control_state=None,
extend_control_events_callback=(
_extend_control_events_default),
modify_events_callback=None):
"""Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes events more
random, less than 1.0 makes events less random.
beam_size: An integer, beam size to use when generating event sequences
via beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_events: A sequence of control events upon which to condition the
generation. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the control events will be
used along with the target sequence to generate model inputs. In some
cases, the control event sequence cannot be fully-determined as later
control events depend on earlier generated events; use the
`extend_control_events_callback` argument to provide a function that
extends the control event sequence.
control_state: Initial state used by `extend_control_events_callback`.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
The generated event sequence (which begins with the provided primer).
Raises:
EventSequenceRnnModelError: If the primer sequence has zero length or
is not shorter than num_steps.
"""
if (control_events is not None and
not isinstance(self._config.encoder_decoder,
mm.ConditionalEventSequenceEncoderDecoder)):
raise EventSequenceRnnModelError(
'control sequence provided but encoder/decoder is not a '
'ConditionalEventSequenceEncoderDecoder')
if control_events is not None and extend_control_events_callback is None:
raise EventSequenceRnnModelError(
'must provide callback for extending control sequence (or use'
'default)')
if not primer_events:
raise EventSequenceRnnModelError(
'primer sequence must have non-zero length')
if len(primer_events) >= num_steps:
raise EventSequenceRnnModelError(
'primer sequence must be shorter than `num_steps`')
if len(primer_events) >= num_steps:
# Sequence is already long enough, no need to generate.
return primer_events
event_sequences = [copy.deepcopy(primer_events)]
# Construct inputs for first step after primer.
if control_events is not None:
# We are conditioning on a control sequence. Make sure it is longer than
# the primer sequence.
control_state = extend_control_events_callback(
control_events, primer_events, control_state)
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events], event_sequences, full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences, full_length=True)
if modify_events_callback:
# Modify event sequences and inputs for first step after primer.
modify_events_callback(
self._config.encoder_decoder, event_sequences, inputs)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_states = state_util.unbatch(self._session.run(graph_initial_state))
# Beam search will maintain a state for each sequence consisting of the next
# inputs to feed the model, and the current RNN state. We start out with the
# initial full inputs batch and the zero state.
initial_state = ModelState(
inputs=inputs[0], rnn_state=initial_states[0],
control_events=control_events, control_state=control_state)
generate_step_fn = functools.partial(
self._generate_step,
temperature=temperature,
extend_control_events_callback=
extend_control_events_callback if control_events is not None else None,
modify_events_callback=modify_events_callback)
events, _, loglik = beam_search(
initial_sequence=event_sequences[0],
initial_state=initial_state,
generate_step_fn=generate_step_fn,
num_steps=num_steps - len(primer_events),
beam_size=beam_size,
branch_factor=branch_factor,
steps_per_iteration=steps_per_iteration)
tf.logging.info('Beam search yields sequence with log-likelihood: %f ',
loglik)
return events
def _evaluate_batch_log_likelihood(self, event_sequences, inputs,
initial_state):
"""Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
Returns:
A Python list containing the log likelihood of each sequence in
`event_sequences`.
"""
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = 1.0
softmax = self._session.run(graph_softmax, feed_dict)
return self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax)
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
"""Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the log likelihood of each
event sequence will be computed conditional on the control sequence.
Returns:
The log likelihood of each sequence in `event_sequences`.
Raises:
EventSequenceRnnModelError: If the event sequences are not all the
same length, or if the control sequence is shorter than the event
sequences.
"""
num_steps = len(event_sequences[0])
for events in event_sequences[1:]:
if len(events) != num_steps:
raise EventSequenceRnnModelError(
'log likelihood evaluation requires all event sequences to have '
'the same length')
if control_events is not None and len(control_events) < num_steps:
raise EventSequenceRnnModelError(
'control sequence must be at least as long as the event sequences')
batch_size = self._batch_size()
num_full_batches = len(event_sequences) / batch_size
loglik = np.empty(len(event_sequences))
# Since we're computing log-likelihood and not generating, the inputs batch
# doesn't need to include the final event in each sequence.
if control_events is not None:
# We are conditioning on a control sequence.
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events] * len(event_sequences),
[events[:-1] for events in event_sequences],
full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
[events[:-1] for events in event_sequences], full_length=True)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_state = [
self._session.run(graph_initial_state)] * len(event_sequences)
offset = 0
for _ in range(num_full_batches):
# Evaluate a single step for one batch of event sequences.
batch_indices = range(offset, offset + batch_size)
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices],
[inputs[i] for i in batch_indices],
initial_state[batch_indices])
loglik[batch_indices] = batch_loglik
offset += batch_size
if offset < len(event_sequences):
# There's an extra non-full batch. Pad it with a bunch of copies of the
# final sequence.
num_extra = len(event_sequences) - offset
pad_size = batch_size - num_extra
batch_indices = range(offset, len(event_sequences))
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices] + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_size)],
[inputs[i] for i in batch_indices] + inputs[-1] * pad_size,
np.append(initial_state[batch_indices],
np.tile(inputs[-1, :], (pad_size, 1)),
axis=0))
loglik[batch_indices] = batch_loglik[0:num_extra]
return loglik
class EventSequenceRnnConfig(object):
"""Stores a configuration for an event sequence RNN.
Only one of `steps_per_quarter` or `steps_per_second` will be applicable for
any particular model.
Attributes:
details: The GeneratorDetails message describing the config.
encoder_decoder: The EventSequenceEncoderDecoder or
ConditionalEventSequenceEncoderDecoder object to use.
hparams: The HParams containing hyperparameters to use. Will be merged with
default hyperparameter values.
steps_per_quarter: The integer number of quantized time steps per quarter
note to use.
steps_per_second: The integer number of quantized time steps per second to
use.
"""
def __init__(self, details, encoder_decoder, hparams,
steps_per_quarter=4, steps_per_second=100):
hparams_dict = {
'batch_size': 64,
'rnn_layer_sizes': [128, 128],
'dropout_keep_prob': 1.0,
'attn_length': 0,
'clip_norm': 3,
'learning_rate': 0.001,
'residual_connections': False,
'use_cudnn': False
}
hparams_dict.update(hparams.values())
self.details = details
self.encoder_decoder = encoder_decoder
self.hparams = contrib_training.HParams(**hparams_dict)
self.steps_per_quarter = steps_per_quarter
self.steps_per_second = steps_per_second
| 43.545966 | 80 | 0.700732 | [
"Apache-2.0"
] | Surya130499/magenta | magenta/models/shared/events_rnn_model.py | 23,210 | Python |
# Functions for visualization
import numpy as np
import networkx as nx
import multinetx as mx
from jinja2 import Environment, FileSystemLoader, Template
import json
from networkx.readwrite import json_graph
def write_mx_to_json(filename, mg, nNodes, pos, nLayers, nodes_to_remove = []):
# filename the complete name of the output file (data/slide_x.json)
# mx the multilayer network as a multinetx object
# nNodes the number of nodes in the first layer
# pos a dictionary of node coordinates
# nLayers the number of layers in the second aspect.
# nodes_to_remove is a list of nodes that should not exist in each layer. Default = []
# From the sparse adj, make a networkx graph and add node attributes
G1 = nx.from_numpy_array(mx.adjacency_matrix(mg,weight='weight').todense())
# Remove nodes from G
G1.remove_nodes_from(nodes_to_remove)
# Recreate the graph G to make the rest work nicely.
G = nx.from_numpy_array(nx.adjacency_matrix(G1).todense())
# Create dictionaries pretending like all nodes exist
scalefact = 20
L2_classes = np.arange(nLayers)
L2_array_original = np.array([])
z_shift = 2
z_array_original = np.array([])
x_orig = np.array([])
y_orig = np.array([])
L1_orig = np.array([])
for level in L2_classes:
L2_array_original = np.concatenate((L2_array_original, np.array([float(level) for i in np.arange(nNodes)])))
z_array_original = np.concatenate((z_array_original, np.array([float(level*z_shift) for i in np.arange(nNodes)])))
x_orig = np.concatenate((x_orig, [pos[key][0]+scalefact for key in pos]))
y_orig = np.concatenate((y_orig, [pos[key][1]+scalefact for key in pos]))
L1_orig = np.concatenate((L1_orig, [i for i in np.arange(nNodes)]))
# Need to delete nodes from our attribute dictionaries, too
L2_array = np.delete(L2_array_original, nodes_to_remove, 0)
z_array = np.delete(z_array_original, nodes_to_remove, 0)
x_array = np.delete(x_orig, nodes_to_remove, 0)
y_array = np.delete(y_orig, nodes_to_remove, 0)
L1_array = np.delete(L1_orig, nodes_to_remove, 0)
## Each node will get attributes L1=node id, L2=slice number, x position, y position, and name/id
id_dict = {i:("id"+str(i)) for i in np.arange(nNodes*nLayers)}
x_dict = {}
y_dict = {}
L2_dict = {i:l2 for i,l2 in enumerate(L2_array)}
z_dict = {i:z_val for i,z_val in enumerate(z_array)}
x_dict = {i:x_val for i,x_val in enumerate(x_array)}
y_dict = {i:y_val for i,y_val in enumerate(y_array)}
L1_dict = {i:L1_val for i,L1_val in enumerate(L1_array)}
nx.set_node_attributes(G, id_dict, name = "name")
nx.set_node_attributes(G, x_dict, name = "x")
nx.set_node_attributes(G, y_dict, name = "y")
nx.set_node_attributes(G, z_dict, name = "z")
nx.set_node_attributes(G, L1_dict, name= "L1")
nx.set_node_attributes(G, L2_dict, name= "L2")
G_json = json_graph.node_link_data(G)
# Write for visualization function
G_json_viz = json.dumps(G_json, indent = 4)
# To save as a .json file
with open(filename, 'w') as fp:
json.dump(G_json, fp)
print(f"done writing mx to {filename}")
return G_json_viz
#Finished defining functions
print("finished defining functions")
def visualize(
mxgraph,
theme="dark",
path_html="visualization_output.html",
title="MX viz",
save_file=True,
):
# Find the module absolute path and locate templates
# module_root = os.path.join(os.path.dirname('./'), "templates")
module_root = "./"
env = Environment(loader=FileSystemLoader(module_root))
# Read in the D3 save pages code and include in the exported html
d3_save_svg_path = "./d3-save-svg-gh-pages/assets/d3-save-svg.min.js"
with open(d3_save_svg_path,'r') as f:
d3_save_svg = f.readlines()
if theme=="dark":
js_path = './javascript/mx_viz.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
elif theme == "light":
js_path = './javascript/mx_vizlighttheme.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style_lighttheme.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz_lighttheme.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
if save_file:
with open(path_html, "wb") as outfile:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def visualize_timeseries(
mxgraph,
path_html="visualization_timeseries_output.html",
title="MX viz",
save_file=True,
):
# Find the module absolute path and locate templates
# module_root = os.path.join(os.path.dirname('./'), "templates")
module_root = "./"
env = Environment(loader=FileSystemLoader(module_root))
# Read in the D3 save pages code and include in the exported html
d3_save_svg_path = "./d3-save-svg-gh-pages/assets/d3-save-svg.min.js"
with open(d3_save_svg_path,'r') as f:
d3_save_svg = f.readlines()
# Find the absolute module path and the static files
# js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
js_path = './javascript/mx_viz_timeseries.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style_timeseries.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz_timeseries.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
if save_file:
with open(path_html, "wb") as outfile:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
| 31.14486 | 122 | 0.632558 | [
"MIT"
] | asizemore/multilayer_network_examples | mx_viz.py | 6,665 | Python |
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('MirrorEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import re
import threading
import copy
from pydispatch import dispatcher
from SmartMeshSDK.protocols.DC2126AConverters import DC2126AConverters
from EventBus import EventBusClient
class MirrorEngine(EventBusClient.EventBusClient):
def __init__(self):
# store params
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
signal = 'parsedAppData_OAPTemperature',
cb = self._publish,
teardown_cb = self._cleanup,
)
self.name = 'DataConnector_MirrorEngine'
# connect extra applications
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.connect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.connect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.connect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
# add stats
# local variables
self.dataLock = threading.Lock()
self.pressureOffsets = {}
self.mirrordata = []
self.dc2126Aconverters = DC2126AConverters.DC2126AConverters()
#======================== public ==========================================
def getMirrorData(self,sender,signal,data):
with self.dataLock:
return copy.deepcopy(self.mirrordata)
def calibrateMirrorData(self,sender,signal,data):
with self.dataLock:
pressures = {}
for row in self.mirrordata:
if row['type']=='pressure':
pressures[row['mac']] = int(row['lastvalue'].split('_')[0])
if len(pressures)==2:
macs = pressures.keys()
offset = pressures[macs[0]]-pressures[macs[1]]
self.pressureOffsets = {}
self.pressureOffsets[macs[0]] = -offset
self.pressureOffsets[macs[1]] = 0
def clearMirrorData(self,sender,signal,data):
with self.dataLock:
self.mirrordata = []
#======================== private =========================================
def _cleanup(self):
# disconnect extra applications
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.disconnect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.disconnect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.disconnect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
def _publish(self,sender,signal,data):
# format the data to publish
newData = []
mac = data['mac']
if signal in ['parsedAppData_OAPTemperature']:
# temperature reported in 1/100th C, displayed in C
temperature_C = float(data['fields']['temperature'])/100.0
# format newData entry
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature_C),
'lastupdated': str(data['timestamp']),
'subscribeToLed': True,
}
]
elif signal in ['parsedAppData_DC2126A']:
# publish temperature
temperature = self.dc2126Aconverters.convertTemperature(
data['fields']['temperature'],
)
if temperature!=None:
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature),
'lastupdated': str(data['timestamp']),
}
]
# publish adcValue
adcValue = self.dc2126Aconverters.convertAdcValue(
data['fields']['adcValue'],
)
newData += [
{
'mac': mac,
'type': 'voltage',
'lastvalue': adcValue,
'lastupdated': str(data['timestamp']),
}
]
# publish energysource
energysource = self.dc2126Aconverters.convertEnergySource(
mac,adcValue,
)
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_SPIPressure']:
with self.dataLock:
if mac in self.pressureOffsets:
offset = self.pressureOffsets[mac]
else:
offset = 0
# format newData entry
newData += [
{
'mac': mac,
'type': 'pressure',
'lastvalue': str(data['fields']['adcPressure']) + "_" + str(offset),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_GPIONet']:
# convert 'pinVal' field to meaning
if data['fields']['pinVal']==1:
energysource = 'solar'
elif data['fields']['pinVal']==2:
energysource = 'vibration'
elif data['fields']['pinVal']==3:
energysource = 'temperature'
else:
energysource = 'battery'
# format newData entry
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_LIS331']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'acceleration',
'lastvalue': '{0}_{1}_{2}'.format(
data['fields']['x'],
data['fields']['y'],
data['fields']['z'],
),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_OAPtilt']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'tilt',
'lastvalue': '{0}'.format(data['fields']['status']),
'lastupdated': str(data['timestamp']),
}
]
else:
raise SystemError('unexpected signal={0}'.format(signal))
# store local mirror of data
with self.dataLock:
for nd in newData:
found = False
newDataSource = nd['mac']
newDataType = nd['type']
for i,e in enumerate(self.mirrordata):
if e['mac']==newDataSource and e['type']==newDataType:
found = True
self.mirrordata[i] = nd
break
if not found:
self.mirrordata.append(nd)
# dispatch (once even if multiple data points)
with self.dataLock:
for nd in newData:
dispatcher.send(
signal = 'newDataMirrored',
data = copy.deepcopy(nd),
)
| 32.876972 | 94 | 0.420745 | [
"BSD-3-Clause"
] | twatteyne/dustlink_academy | DataConnector/MirrorEngine.py | 10,422 | Python |
(p9.ggplot(titanic.dropna(subset=['Age']), p9.aes(x='Sex', y='Age'))
+ p9.geom_violin()
+ p9.geom_jitter(alpha=0.2)
) | 31.75 | 68 | 0.598425 | [
"BSD-3-Clause"
] | jonasvdd/DS-python-data-analysis | notebooks/_solutions/visualization_02_plotnine3.py | 127 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Moves a list of remote media from one media store to another.
The input should be a list of media files to be moved, one per line. Each line
should be formatted::
<origin server>|<file id>
This can be extracted from postgres with::
psql --tuples-only -A -c "select media_origin, filesystem_id from
matrix.remote_media_cache where ..."
To use, pipe the above into::
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
"""
from __future__ import print_function
import argparse
import logging
import sys
import os
import shutil
from synapse.rest.media.v1.filepath import MediaFilePaths
logger = logging.getLogger()
def main(src_repo, dest_repo):
src_paths = MediaFilePaths(src_repo)
dest_paths = MediaFilePaths(dest_repo)
for line in sys.stdin:
line = line.strip()
parts = line.split('|')
if len(parts) != 2:
print("Unable to parse input line %s" % line, file=sys.stderr)
exit(1)
move_media(parts[0], parts[1], src_paths, dest_paths)
def move_media(origin_server, file_id, src_paths, dest_paths):
"""Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths):
"""
logger.info("%s/%s", origin_server, file_id)
# check that the original exists
original_file = src_paths.remote_media_filepath(origin_server, file_id)
if not os.path.exists(original_file):
logger.warn(
"Original for %s/%s (%s) does not exist",
origin_server, file_id, original_file,
)
else:
mkdir_and_move(
original_file,
dest_paths.remote_media_filepath(origin_server, file_id),
)
# now look for thumbnails
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
origin_server, file_id,
)
if not os.path.exists(original_thumb_dir):
return
mkdir_and_move(
original_thumb_dir,
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
)
def mkdir_and_move(original_file, dest_file):
dirname = os.path.dirname(dest_file)
if not os.path.exists(dirname):
logger.debug("mkdir %s", dirname)
os.makedirs(dirname)
logger.debug("mv %s %s", original_file, dest_file)
shutil.move(original_file, dest_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class = argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-v", action='store_true', help='enable debug logging')
parser.add_argument(
"src_repo",
help="Path to source content repo",
)
parser.add_argument(
"dest_repo",
help="Path to source content repo",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
}
logging.basicConfig(**logging_config)
main(args.src_repo, args.dest_repo)
| 28.358209 | 87 | 0.675 | [
"Apache-2.0"
] | AP-whitehat/synapse | scripts/move_remote_media_to_new_store.py | 3,800 | Python |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Find your credentials at twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
compositionHook = client.video.compositionHooks('KHXXXX').update(
friendlyName = 'MyFirstCompositionHook',
enabled = False,
audio_sources = '*',
video_layout = {
'grid' : {
'video_sources': ['*']
}
},
status_callback = 'http://my.server.org/callbacks',
format='mp4')
print('Updated Composition Hook with SID=%s' % (compositionHook.sid))
| 32.590909 | 72 | 0.631799 | [
"MIT"
] | Abubakar672/api-snippets | video/rest/compositionhooks/update-hook/update-hook.6.x.py | 717 | Python |
import sqlite3;
import csv;
import sys;
from ordery.db import get_db
from flask import current_app
def order_csv(filename):
## Connect to the database
try:
conn = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES); # Get a connection object for the database
conn.execute('PRAGMA foreign_keys = ON;'); # Turn on foreign key constraints
csr = conn.cursor(); # Get a cursor object for the connection
except Exception as e:
print("Error connecting to database: ", e); # Print error message
sys.exit(); # Fatal Error
## Open the orders csv file
try:
f = open(filename, newline=''); # Open the file – default for reading
r = csv.DictReader(f); # Return a dictionary reader iterator for the file
print("\n csv file openned successfully")
except Exception as e:
print("Error opening csv file: ", e); # Print error message
sys.exit(); # Fatal Error
## --------------------------------------
## Loop through the orders csv file and insert each row in the table
## File title line: ord_nbr, prod_nbr, ord_qty, ord_date
for d_row in r: # Loop on each row in the file into a list
t_row = (int(d_row['ord_nbr']), d_row['prod_nbr'], int(d_row['ord_qty']), d_row['ord_date']);
csr.execute('BEGIN TRANSACTION'); # Start transaction
try:
# Check if order number already exists
v_sql = 'SELECT id FROM orders WHERE ord_nbr = ?';
csr.execute(v_sql,(t_row[0],) );
t_id = csr.fetchone(); # Get the order id
if t_id != None: # Order number already exists in orderss
print("\nOrder number " + str(t[0]) + " already exists in orders table");
continue; # Get next order
# Get product number id IF it exists in product table
v_sql = 'SELECT id FROM products WHERE prod_nbr = ?';
csr.execute(v_sql,(t_row[1],) );
t_pid = csr.fetchone(); # Get the product id
if t_pid == None:
print("\nProduct number " + str(t_row[1]) + " does not exist in products table");
continue; # Get next order
# If order number Not Exist and product number Exist then Insert the order
if t_id == None and t_pid != None:
v_sql = '''INSERT INTO orders (ord_nbr, ord_qty, ord_date, prod_id)
VALUES (?,?,?,?);'''
csr.execute(v_sql, (t_row[0], t_row[2], t_row[3], t_pid[0]) );
conn.commit(); # Commit transaction for this row
except Exception as e:
print("Error loading Orders table " + str(e)); # Print error message
print("Order number: ", t_row[0]); # Identify order number
conn.rollback(); # Rollback this transaction
f.close(); # Close the file
conn.close()
| 48.0625 | 104 | 0.562094 | [
"MIT"
] | lambdagirl/ordery | ordery/order_csv.py | 3,078 | Python |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
pretrained='open-mmlab://detectron2/resnext101_32x8d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch'))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
| 29.452381 | 77 | 0.599838 | [
"Apache-2.0"
] | CONTEC-SIP/mmdetection | configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py | 2,474 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
from swin_transformer import SwinTransformer
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def swin_t(**kwargs):
from config import get_config_swin_t
config = get_config_swin_t()
model = SwinTransformer(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 39.557325 | 124 | 0.613397 | [
"Apache-2.0"
] | likelyzhao/dino | vision_transformer.py | 12,421 | Python |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupSelectorCollection(object):
"""
A collection of selectors. The combination of instances matching the selectors are included in the instance group.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.
:type items: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
self.swagger_types = {
'items': 'list[ComputeInstanceGroupSelector]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
**[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:return: The items of this ComputeInstanceGroupSelectorCollection.
:rtype: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:param items: The items of this ComputeInstanceGroupSelectorCollection.
:type: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.028169 | 245 | 0.697029 | [
"Apache-2.0",
"BSD-3-Clause"
] | CentroidChef/oci-python-sdk | src/oci/devops/models/compute_instance_group_selector_collection.py | 2,558 | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Update the location of a adrespositie and and add a terrein koppeling using a shapeFile
import os, sys, codecs, datetime, argparse
import osgeo.ogr as ogr
from pyspatialite import dbapi2 as sqlite3 #import sqlite3
def updateTerrein(cur, TERREINOBJECTID , HUISNUMMERID):
cur.execute("""INSERT INTO TERREINOBJECT_HUISNUMMER_RELATIES
(ID, TERREINOBJECTID , HUISNUMMERID, BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, BEGINTIJD )
VALUES ( (SELECT MAX("ID")+ 1 FROM "TERREINOBJECT_HUISNUMMER_RELATIES"),
?, ?, date('now'), 1, 1, strftime('%Y-%m-%dT%H:%M:%S','now')) ;""", (TERREINOBJECTID , HUISNUMMERID))
def updateAdresPosistie(cur, X , Y , herkomst, ADRESID ):
'herkomst: 2= perceel, 3= gebouw'
cur.execute("""UPDATE ADRESPOSITIES
SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),
HERKOMSTADRESPOSITIE=? WHERE ID=? ;""", (X, Y, herkomst, ADRESID))
def removeDoubleTerreinKoppeling(cur):
#joined twice or more
cmd1 = """DELETE FROM TERREINOBJECT_HUISNUMMER_RELATIES
WHERE BEGINTIJD IS NULL OR BEGINTIJD > DATE('now', '-1 day')
AND EXISTS (
SELECT t2.terreinobjectid , t2.huisnummerid , t2.begindatum
FROM TERREINOBJECT_HUISNUMMER_RELATIES t2
WHERE eindtijd IS NULL
AND TERREINOBJECT_HUISNUMMER_RELATIES.terreinobjectid = t2.terreinobjectid
AND TERREINOBJECT_HUISNUMMER_RELATIES.huisnummerid = t2.huisnummerid
AND TERREINOBJECT_HUISNUMMER_RELATIES.begindatum = t2.begindatum
GROUP BY t2.terreinobjectid, t2.huisnummerid, t2.begindatum
HAVING COUNT(*) > 1
AND MAX(t2.ID) <> TERREINOBJECT_HUISNUMMER_RELATIES.ID
); """
#joined to a adres with an enddate
cmd2 = """DELETE FROM TERREINOBJECT_HUISNUMMER_RELATIES
WHERE BEGINTIJD IS NULL OR BEGINTIJD > DATE('now', '-1 day')
AND EXISTS (
SELECT einddatum FROM HUISNUMMERS
WHERE
ID = TERREINOBJECT_HUISNUMMER_RELATIES.huisnummerid
AND IFNULL(einddatum, '9999-01-01') <
IFNULL(TERREINOBJECT_HUISNUMMER_RELATIES.einddatum, '9999-01-01')
);"""
cur.execute( cmd1 )
cur.execute( cmd2 )
def readShape( shapefile, xgrabDB , koppelType=3 ):
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, 0)
layer = dataSource.GetLayer(0)
con = sqlite3.connect( xgrabDB )
with con:
cur = con.cursor()
cur.execute( "CREATE INDEX IF NOT EXISTS adresID_index ON ADRESPOSITIES (ID);" )
con.commit()
for feature in layer:
geom = feature.GetGeometryRef()
adresID = feature.GetFieldAsInteger("ADRESID")
terreinID = feature.GetFieldAsInteger("TERREINOBJ")
huisnrID = feature.GetFieldAsInteger("HUISNR_ID")
X, Y = ( geom.GetX() , geom.GetY() )
updateAdresPosistie(cur, X, Y, koppelType, adresID)
updateTerrein(cur, terreinID , huisnrID)
removeDoubleTerreinKoppeling(cur)
con.commit()
if con:
con.close()
def main():
readShape(args.shapeFile, args.xgrabDB, int( args.koppelType) )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='update adresposities in a xgrab-db using a shapefile, requires spatialite and gdal-python')
parser.add_argument('xgrabDB', help='The input database (.sqlite)' )
parser.add_argument('shapeFile', help='The path to the shapefile, has a TERREINOBJ, HUISNR_ID and adresID')
parser.add_argument('koppelType', help='2 for parcel and 3 for building', default='3')
args = parser.parse_args()
main()
| 44.438202 | 141 | 0.637927 | [
"MIT"
] | warrieka/xgrab2db | update_terrein_adrespositie.py | 3,955 | Python |
import sqlite3
import datetime
from collections import Counter
import calendar
def return_unique_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def return_unique_order_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM orders')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def input_entry(customerFirstName, customerLastName, customerPhoneNumber, customerAddress, customerPayMethod):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_ID()
rolodexEntry = (uniqueID, customerFirstName, customerLastName, customerPhoneNumber, customerAddress, customerPayMethod)
c.execute('INSERT INTO rolodex VALUES (?,?,?,?,?,?)', rolodexEntry)
conn.commit()
def return_all_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex ORDER BY last_name')
return c
def review_all_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex')
for item in c:
print(item[0], item[1], item[2], item[3], item[4])
def delete_entry_by_id(uniqueID):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('DELETE FROM rolodex WHERE id = ?', uniqueID)
conn.commit()
def update_rolodex_entry(variable, variable_type, uniqueID):
if variable_type == "name":
update_rolodex_entry_name(variable, uniqueID)
elif variable_type == "phoneNumber":
update_rolodex_entry_phoneNumber(variable, uniqueID)
elif variable_type == "address":
update_rolodex_entry_address(variable, uniqueID)
elif variable_type == "payMethod":
update_rolodex_entry_payMethod(variable, uniqueID)
else:
print("failed to update anything.")
def update_rolodex_entry_name(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET name = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_phoneNumber(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET phoneNumber = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_address(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET address = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_payMethod(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET payMethod = ?
WHERE id = ?''', combo)
conn.commit()
def create_rolodex_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS rolodex (
id integer PRIMARY KEY,
first_name text,
last_name text,
phone_number int,
address text,
payMethod text)
"""
c.execute(create_table)
conn.commit()
def search_by_customer_id(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id = (customer_id,)
c.execute('''SELECT * FROM rolodex WHERE id = (?)''', customer_id)
return c
def search_by_customer_first_name(customer_name):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_name = (customer_name,)
c.execute('''SELECT * FROM rolodex WHERE first_name = (?)''', customer_name)
return c
def search_by_customer_last_name(customer_name):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_name = (customer_name,)
c.execute('''SELECT * FROM rolodex WHERE last_name = (?)''', customer_name)
return c
def search_by_customer_phone_number(customer_phone_number):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_phone_number = (customer_phone_number,)
c.execute('''SELECT * FROM rolodex WHERE phone_number = (?)''', customer_phone_number)
return c
def create_orders_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE orders (
id integer PRIMARY KEY,
custid SMALLINT,
orderlist text,
time_stamp text)
"""
c.execute(create_table)
conn.commit()
def create_customerprefs_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS customerprefs (
id integer PRIMARY KEY,
customer_id integer,
field_id integer,
description text)
"""
c.execute(create_table)
conn.commit()
def new_customer_delivery_preference(customerID, customer_delivery_preference):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_customerpreference_ID()
orderEntry = (uniqueID, customerID, 10, customer_delivery_preference)
c.execute('INSERT INTO customerprefs VALUES (?,?,?,?)', orderEntry)
conn.commit()
def return_unique_customerpreference_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM customerprefs')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def input_new_order(customerID, order_list):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_order_ID()
orderEntry = (uniqueID, 1, order_list, datetime.datetime.now())
c.execute('INSERT INTO orders VALUES (?,?,?,?)', orderEntry)
conn.commit()
#def drop_rolodex_table():
# conn = sqlite3.connect("ORDERM8.db")
# c = conn.cursor()
# c.execute('DROP table rolodex')
# for item in c:
# orderlist = item[2].split()
# print item[0], item[1], orderlist, item[3]
def return_all_customerprefs_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM customerprefs')
return c
def get_latest_customerprefs(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute('SELECT * FROM customerprefs WHERE customer_id=(?) AND field_id = 10 ORDER BY id DESC LIMIT 1',
customer_id_tuple)
for item in c:
return item
def get_latest_foodprefs(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute('SELECT * FROM customerprefs WHERE customer_id=(?) AND field_id = 20 ORDER BY id DESC LIMIT 1',
customer_id_tuple)
for item in c:
return item
def new_customer_food_preference(customerID, customer_food_preference):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_customerpreference_ID()
orderEntry = (uniqueID, customerID, 20, customer_food_preference)
c.execute('INSERT INTO customerprefs VALUES (?,?,?,?)', orderEntry)
conn.commit()
def delete_customer_and_customer_records(customerID):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
id = (customerID,)
c.execute('DELETE FROM rolodex WHERE id=(?)', id)
c.execute('DELETE FROM customerprefs WHERE customer_id=(?)', id)
conn.commit()
# Day Duties Stuff.
def create_day_duties_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS day_duties (
id integer PRIMARY KEY,
date_of_entry DATE,
day_of_week text,
task text)
"""
c.execute(create_table)
conn.commit()
def return_unique_day_duty_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM day_duties')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def new_day_duty(date_of_entry, day_of_week, task):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_day_duty_ID()
dutyEntry = (uniqueID, date_of_entry, day_of_week, task)
c.execute('INSERT INTO day_duties VALUES (?,?,?,?)', dutyEntry)
conn.commit()
def return_all_day_duties():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM day_duties')
return c
def search_by_day_of_week(day_of_week):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
day_of_week = (day_of_week,)
c.execute('''SELECT * FROM day_duties WHERE day_of_week = (?)''', day_of_week)
return c
# Examples
# new_day_duty(datetime.datetime.now(), "Wednesday", "Condense Recycling")
# for item in return_all_day_duties():
# print item
# DAILY CUSTOMER ENTRIES
def delete_daily_customer_entrys(order_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
order_id = (str(order_id),)
c.execute('''DELETE FROM daily_customers WHERE id=(?)''',order_id)
conn.commit()
def return_unique_daily_customer_entry_id():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM daily_customers')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def create_daily_customers_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE daily_customers (
id integer PRIMARY KEY,
custid SMALLINT,
todays_date DATE)
"""
c.execute(create_table)
conn.commit()
def new_daily_customer(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_daily_customer_entry_id()
dutyEntry = (uniqueID, customer_id, datetime.date.today(),)
c.execute('INSERT INTO daily_customers VALUES (?,?,?)', dutyEntry)
conn.commit()
def return_all_daily_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
date = (datetime.date.today(),)
c.execute('SELECT * FROM daily_customers WHERE todays_date=(?)', date)
return c
def weekly_graph_data():
entries = return_this_weeks_customer_entries()
daycount = enumerate_this_weeks_customer_entries(entries)
week_dictionary = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0}
for key, value in daycount.items():
if key in week_dictionary:
week_dictionary[key] = value
else:
pass
return week_dictionary
def monthly_graph_data():
entries = return_this_months_customer_entries()[0]
start_end = return_this_months_customer_entries()[1]
daycount = enumerate_this_months_customer_entries(entries)
month_dictionary = {}
dictionary_range = range(start_end[0].day,start_end[1].day+1,1)
for item in dictionary_range:
month_dictionary[item] = 0
for key, value in daycount.items():
if key in month_dictionary:
month_dictionary[key] = value
else:
pass
return month_dictionary
def yearly_graph_data():
entries = return_this_years_customer_entries()
daycount = enumerate_this_years_customer_entries(entries)
week_dictionary = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0}
for key, value in daycount.items():
if key in week_dictionary:
week_dictionary[key] = value
else:
pass
return week_dictionary
def return_all_customer_entries_from_daily_customers():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM daily_customers')
return c
def return_this_weeks_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = (datetime.date.today(),)
week_start = today_date[0] - datetime.timedelta(days=today_date[0].weekday())
week_end = week_start + datetime.timedelta(days=4)
week_tuple = (week_start, week_end,)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', week_tuple)
return c
def enumerate_this_weeks_customer_entries(customer_entries):
dates = []
for item in customer_entries:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").weekday())
return Counter(days)
def return_this_months_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = datetime.date.today()
start_of_month = datetime.datetime(today_date.year, today_date.month, 1)
end_of_month = datetime.datetime(today_date.year,
today_date.month,
calendar.monthrange(today_date.year, today_date.month)[1])
start_end_tuple = (start_of_month, end_of_month)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', start_end_tuple)
return c, start_end_tuple
def enumerate_this_months_customer_entries(customer_entries):
dates = []
for item in customer_entries:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").day)
return Counter(days)
def return_this_years_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = datetime.date.today()
start_of_year = datetime.datetime(today_date.year, 1, 1)
end_of_year = datetime.datetime(today_date.year, 12, 31)
start_end_tuple = (start_of_year, end_of_year)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', start_end_tuple)
return c, start_end_tuple
def enumerate_this_years_customer_entries(customer_entries):
dates = []
for item in customer_entries[0]:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").month)
return Counter(days)
# for messing around with daily customer entries
# for item in return_all_daily_customer_entries():
# print item
# for item in range(0,15):
# delete_daily_customer_entrys(item)
# FOR COPYING ROLODEX AND CUSTOMERPREFS FROM PEPS DB TO NEW DB
# will have to delete all entries on the new db in customerprefs and rolodex for this to work.
def create_test_empty_db():
conn = sqlite3.connect("ORDERM8_test.db")
conn.close()
def copy_table_db_to_db():
conn = sqlite3.connect("ORDERM8_test.db")#newdb
c = conn.cursor()
c.execute("ATTACH 'ORDERM8.db' AS test")#pepsdb
c.execute("INSERT INTO rolodex SELECT * FROM test.rolodex")
conn.commit()
# create_test_empty_db()
# create_rolodex_table()
# will only need this function once rolodex and customerprefs are cleared out.
# copy_table_db_to_db()
def select_all_activity(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute("SELECT * FROM daily_customers WHERE custid=(?)", customer_id_tuple)
return c
def select_recent_activity(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute("SELECT * FROM daily_customers WHERE custid=(?) ORDER BY todays_date DESC LIMIT 10", customer_id_tuple)
return c
| 30.608939 | 123 | 0.648902 | [
"MIT"
] | StavromularBeta/ORDERM8 | SQL_functions.py | 16,437 | Python |
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Multi-file class combining taken from https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py
import logging
from abc import ABC
from typing import Literal
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from .reactroles import ReactRoles
from .roles import Roles
log = logging.getLogger("red.phenom4n4n.roleutils")
RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"]
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
class RoleUtils(
Roles,
ReactRoles,
commands.Cog,
metaclass=CompositeMetaClass,
):
"""
Useful role commands.
Includes massroling, role targeting, and reaction roles.
"""
__version__ = "1.3.5"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
n = "\n" if "\n\n" not in pre_processed else ""
return f"{pre_processed}{n}\nCog Version: {self.__version__}"
def __init__(self, bot: Red, *_args) -> None:
self.cache = {}
self.bot = bot
self.config = Config.get_conf(
self,
identifier=326235423452394523,
force_registration=True,
)
default_guild = {"reactroles": {"channels": [], "enabled": True}}
self.config.register_guild(**default_guild)
default_guildmessage = {"reactroles": {"react_to_roleid": {}}}
self.config.init_custom("GuildMessage", 2)
self.config.register_custom("GuildMessage", **default_guildmessage)
super().__init__(*_args)
async def red_delete_data_for_user(self, *, requester: RequestType, user_id: int) -> None:
return
async def initialize(self):
log.debug("RoleUtils initialize")
await super().initialize()
| 33.7 | 125 | 0.71876 | [
"MIT"
] | Arman0334/phen-cogs | roleutils/roleutils.py | 3,033 | Python |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 17:59:20 2017
@author: amirbitran
Various functions that serve to compute the contacts matrix for a series of PDB snapshots
"""
import numpy as np
from matplotlib import pyplot as plt
import sklearn
from sklearn import metrics
from dbfold.utils import loopCluster
import joblib
import copy as cp
import matplotlib.colors as cccc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"""
much faster computation
min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted
You can specify either of two modes:
1. 'binary': Returns 1 at positions where distance is less than or equal to thresh
2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh
"""
M=metrics.pairwise.pairwise_distances(coords)
M=np.tril(M, -min_seq_separation) #-min_seq_separation enures that we do not count residues that are closer than min_seq_separation
if mode=='binary':
contacts=np.zeros(np.shape(M))
contacts[np.where((M<thresh) & (M!=0))]=1
elif mode=='distances':
contacts=np.zeros(np.shape(M))
contacts[M>0]=M[M>0]
return contacts
def compute_RG(snapshot, atom='CA'):
"""
Radius of gyration...
"""
coords, resis = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = coords - R_cm
mag_R = np.sum(dR*dR, axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG
def count_contacts(native_file, d_cutoff, min_seq_separation):
coords, resis=read_PDB(native_file, 'CA')
native_contacts=compute_contacts_matrix(coords, thresh=d_cutoff, min_seq_separation=min_seq_separation)
return int(np.sum(native_contacts))
def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh,min_seq_separation=8, substructures = [], colours = []):
"""
Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol
Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)
You can also pre-enter the substructures as an optional argument
Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb
You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)
Otherwise, it will color things automatically using the usual default sequence
That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if len(substructures)==0:
native_contacts, substructures = identify_native_substructures(PDB_path, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, plot=False)
prefix = PDB_path.split('pdb')[0]
PML_path = '{}pml'.format(prefix)
Nsubs = np.shape(substructures)[2]
file = open(PML_path, 'w')
file.write('bg white \n color gray \n')
if len(colours)==0:
colors=cm.get_cmap('jet')
counter = 0
for s in range(Nsubs):
if alphabet[s] in subs_to_plot:
if len(colours)==0:
curr_color=colors((s)/(Nsubs-1 ))
else:
curr_color = colours[counter]
c_hex = cccc.to_hex(curr_color)
c_hex = '0x{}'.format(c_hex.split('#')[1])
sub = substructures[:,:,s]
contacts = np.where(sub)
substr = 'sub{}'.format(alphabet[s])
for z in range(len(contacts[0])):
i = contacts[0][z]+1
j = contacts[1][z]+1
lines = "select aa, //resi {}/CA \n select bb, //resi {}/CA \n distance {}, aa, bb \n hide labels, {} \n set dash_color, {}, {} \n ".format(i, j, substr, substr, c_hex, substr)
file.write(lines)
file.write('\n set dash_gap, 0.5 \n set dash_radius, 0.2 \n')
counter+=1
file.close()
def find_native_contacts(native_file, thresh, min_seq_separation, mode = 'binary'):
"""
finds all native contacts from native PDB file
"""
native_coords, resis=read_PDB(native_file, atom='CA')
native_contacts=compute_contacts_matrix(native_coords, thresh = thresh, min_seq_separation = min_seq_separation, mode = mode )
return native_contacts
def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize,atom='CA', labelsize = 30, fontsize = 30, max_res = None, plot=True, ax = None, native_contacts=[], verbose=False):
"""
Identify substructures within native file contact map
Using the following strategy
We produce a contact map which is a bunch of dots
Contacts correspond to pairs of residues that are less than d_cutoff apart
6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily
via sidechains, and thus the alpha carbons are further apart
We only count contacts if the residues are separated by min_seq_separation along the primary sequence
We set min_seq_separation relatively high because we don't care to include intra-helix contacts within our contact map
Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map
and build a graph of contacts in which two contacts are connected if they are less than some
threshold distance, contact_sep_thresh, apart in the contact map
Then, we find all connected components of this graph, each of which is a substructure
But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont' care about
Gives you option to input native contacts a priori, but by defualt you don't do this (value is set to None)
You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized)
"""
if len(native_contacts)==0:
coords, resis=read_PDB(native_file, atom)
#we get a contact map with a min seq separation larger than usual to avoid helices
native_distances=compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
native_contacts=np.zeros(np.shape(native_distances))
native_contacts[np.where((native_distances<d_cutoff) & (native_distances!=0))]=1
positions=np.where(native_contacts==1) #which residues participate in contacts
positions=np.transpose(positions)
M=metrics.pairwise.pairwise_distances(positions, metric='manhattan') #how far is each contact from each other contact?
#To find connected components, I use my loopCluster function by feeding in the positions ofr the contacts instead of the "files",
#as well as above matrix M as d_contacts
clusters, pairs_in_substructures, mean_intercluster, mean_intracluster=loopCluster(contact_sep_thresh, positions, M, sort_orphans=False, min_clustersize=min_clustersize, verbose=verbose)
#pairs in substructures is a list of sublists, each of which correspodns to a given substructure
#Within a given sublist, there are a bunch of pairs which tell you which pairs of residues belong to that substructure
#The above is in a messy form, so we convert it into a form that allows for numpy indexing,
#where we have a list of sublists, each sublist contains two arrays, the first of which gives the first indices for the interacting residues
#pairs_in_substructures=[[np.array(C)[:,0], np.array(C)[:,1]] for C in pairs_in_substructures]
pairs_in_substructures=[(np.array(C)[:,0], np.array(C)[:,1]) for C in pairs_in_substructures]
nsubstructures=len(pairs_in_substructures) #we now produce a set of matrices...the ith page tells you which contacts belong to the ith substructure
substructures=np.zeros((np.shape(native_contacts)[0], np.shape(native_contacts)[1], nsubstructures))
for n in range(nsubstructures):
SS=np.zeros(np.shape(native_contacts))
SS[pairs_in_substructures[n]]=1
substructures[:,:,n]=SS
if plot:
visualize_substructures(native_contacts, substructures, max_res = max_res, ax = ax, labelsize = labelsize, fontsize = fontsize)
#print(positions)
return native_contacts, substructures
def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot = True,mode='binary'):
"""
Input PDB file, plots contacts matrix
"""
coords, resis = read_PDB(PDB_file, 'CA')
M=metrics.pairwise.pairwise_distances(coords)
M=np.tril(M, -min_seq_separation) #-min_seq_separation enures that we do not count residues that are closer than min_seq_separation
if mode=='binary':
contacts=np.zeros(np.shape(M))
contacts[np.where((M<thresh) & (M!=0))]=1
elif mode=='distances':
contacts=np.zeros(np.shape(M))
contacts[M>0]=M[M>0]
if plot:
plt.figure()
plt.imshow(contacts)
plt.title(PDB_file)
return contacts
def read_PDB(file, atom):
"""
extracts coordinates for some side chain atom in some PDB file
For instance, atom will have value 'CA' if you care about the alpha carbons
TODO: Fix this so it can deal with chain labels
Right now if the PDB has a chain label in the fifth column, this will give nonsense results
"""
openfile=open(file)
resis=[]
coords=[]
for line in openfile.readlines():
#print(line)
line=line.rstrip('\n')
entries=line.split()
if entries[0]=='ATOM':
if entries[2]==atom and entries[4] =='A' and entries[3]!='GLY': #So long as the current residue is not a glycine, we append the coordinate for the carbon of interest
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif len(entries)>1 and entries[2]==atom and entries[4] !='A' and entries[3]!='GLY':
#first, we debug an error that sometimes happens
if '-' in entries[5][1:-1]: #occasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)
x=entries[5]
entries[5]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[6]=x[(x[1:-1].index('-')+1):]
if '-' in entries[6][1:-1]: #occasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)
x=entries[6]
entries[6]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[7]=x[(x[1:-1].index('-')+1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
elif len(entries)>1 and entries[2]=='CA' and entries[4] =='A' and entries[3]=='GLY': #But if the current residue is a glycine, we can only append the alpha carbon since there is no side chain
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif len(entries)>1 and entries[2]=='CA' and entries[4] !='A' and entries[3]=='GLY':
#first, we debug an error that sometimes happens
if '-' in entries[5][1:-1]: #occasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)
x=entries[5]
entries[5]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[6]=x[(x[1:-1].index('-')+1):]
if '-' in entries[6][1:-1]: #occasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)
x=entries[6]
entries[6]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[7]=x[(x[1:-1].index('-')+1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
coords=np.array(coords)
return coords, resis
def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8 ):
"""
Assigns a set of scores for a snapshot
the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto
If the score is close to the characteristic contact distnace, then the substructure should be mostly formed
"""
coords, resis=read_PDB(snapshot, atom)
distances=compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
length=np.shape(distances)[0]
len_substructures=np.shape(substructures)[0]
if length>len_substructures: #We are applying substructures from a smaller protein to analyze a larger protein, so we only keep the part of the larger protein that is encompassed by these substructures
distances=distances[0:len_substructures, 0:len_substructures]
nsubstructures=np.shape(substructures)[2]
scores=np.zeros((nsubstructures))
for s in range(nsubstructures):
sub=substructures[:,:,s]
participation=np.multiply(distances, sub)#gives the overlap between native substrucutres and this snapshot's contacts
scores[s]=np.mean(participation[np.nonzero(participation)])
return scores, distances
def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return = False, cbar = True, filter_natives = True, filter_distance = 2, vmax = 1, alpha = 1,custom_filter = None, ax=None, labelsize = 40):
"""
Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih
that contact is observed
d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be
the same as whatever was used to identify the nonnatives
if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance
You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none
Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance
If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction
By the way, the variable vmax says what is the strongest value in the colorbar
By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map
"""
native_contacts, substructures = identify_native_substructures(native_file, d_cutoff=d_cutoff, plot=False)
[distance_maps, PDB_files, filescores]=joblib.load(nonnatives_path)
if np.shape(distance_maps)[2]>len(PDB_files): #there is an extra page attached to end of the distance maps that tells you mean distances between residues
mean_distances = distance_maps[:,:,-1]
distance_maps = distance_maps[:, :, 0:-1]
mean_nonnatives=np.mean(distance_maps, axis=2)
NN = np.shape(mean_nonnatives)[0]
if filter_natives or np.shape(custom_filter)!=():
if filter_natives and np.shape(custom_filter)==():
Filter=cp.deepcopy(native_contacts)
elif filter_natives and np.shape(custom_filter)!=():
Filter = cp.deepcopy(native_contacts) + custom_filter
zz = np.zeros(np.shape(Filter))
zz[np.where(Filter>0)]=1
Filter = zz
else:
Filter = custom_filter
#plt.figure()
#plt.imshow(Filter)
for d in range(-filter_distance, filter_distance+1): #gets rid of register-shifted native contacts
im1_to_add=np.roll(Filter, d, axis=1)
if d<0:
im1_to_add[:, d:]=0
else:
im1_to_add[:, 0:d]=0
im2_to_add=np.roll(Filter, d, axis=0)
if d<0:
im2_to_add[d:,:]=0
else:
im2_to_add[0:d, :]=0
Filter=Filter+im1_to_add + im2_to_add
Filter[np.where(Filter)]=1
#plt.figure()
#plt.imshow(Filter)
mean_nonnatives = np.multiply(mean_nonnatives, 1 - Filter)
#if filter_natives: mean_nonnatives=np.multiply(mean_nonnatives, 1 - native_contacts)
#Commented all this out September 3 2019
#if cmap != 'Greys':
# for i in range(NN):
# for j in range(NN):
# if mean_nonnatives[i,j]==0:
# mean_nonnatives[i,j] = np.nan #makes points without any contact probability show up as white rather than peach red
if vmax == None:
vmax = np.max(mean_nonnatives)
normalize = cccc.Normalize(vmin = 0, vmax = vmax)
if ax == None:
fig, ax = plt.subplots()
if cmap!=None:
#im = ax.imshow(mean_nonnatives, cmap=cmap, norm = normalize, alpha = alpha, origin = 'lower')
im = ax.imshow(mean_nonnatives + np.transpose(mean_nonnatives), cmap=cmap, norm = normalize, alpha = alpha, origin = 'upper') #changed to this on 1/10/19
else:
#im = ax.imshow(mean_nonnatives, norm = normalize, alpha = alpha, origin = 'lower')
im = ax.imshow(mean_nonnatives + np.transpose(mean_nonnatives), norm = normalize, alpha = alpha, origin = 'upper') #changed to this on 1/10/19
#im.set_clim((0, vmax))
if cbar:
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=labelsize)
ax.tick_params(labelsize=labelsize)
ax.plot(np.arange(0, len(mean_nonnatives)), np.arange(0, len(mean_nonnatives)), color='gray', linestyle=':' ) #added 1/10/19
if Return: return im
def visualize_substructures( native_contacts, substructures, max_res = None, ax = None, labelsize = 30, fontsize = 30):
"""
Visualizes substructures as follows
Everything that is a native contact but not part of any substructure will have value -1 on shown image
(Update 10/1/18, actually will only show contacts that are part of substructures)
Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i
Finally, all non-contacts will just be Nans and appear white
Edited this on 2/4/19 so that substructures are labeled by letter rather than number
Also reinstated the feature that contacts unassigned to substructures are visualized
On 2/10/2020, Changed a bit how the script work
Made it a bit simpler
Also made it so unassigned contacts are now shown in gray
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
substructure_image=np.zeros(np.shape(native_contacts))
native_contacts=native_contacts+np.transpose(native_contacts)
unassigned_contacts = cp.deepcopy(native_contacts)
for s in range(np.shape(substructures)[2]):
substructure_image+=(s+1)*(substructures[:,:,s]+substructures[:,:,s].transpose())
#substructure_image+=(s+1)*substructures[:,:,s]
unassigned_contacts-=substructures[:,:,s]+substructures[:,:,s].transpose()
substructure_image[substructure_image==0] = np.nan #Set background to white
#im[im<0]=np.nan #10/1
#im[np.diag_indices(len(native_contacts))]=0
colors=cm.get_cmap('jet')
if ax ==None: fig, ax = plt.subplots()
#ax.imshow(im, cmap='jet')
ax.imshow(substructure_image, cmap='jet')
ax.tick_params(labelsize=labelsize)
for s in range(np.shape(substructures)[2]):
#Let's annotate
#y_pos=np.where(substructures[:,:,s])[0][0]-3
y_pos=np.where(substructures[:,:,s])[0][0]+4 #2/4, decreased this from +6
x_pos=np.where(substructures[:,:,s])[1][0]+5 #2/4, increased this from +5
#curr_color=colors((s+1)/(np.max(substructure_image) ))
curr_color=colors((s)/(np.nanmax(substructure_image)-1 ))
#print(np.max(substructure_image)-1)
ax.annotate('{}'.format(alphabet[s]), (x_pos, y_pos), fontsize=fontsize, color=curr_color)
ax.annotate('{}'.format(alphabet[s]), (y_pos-5, x_pos-8), fontsize=fontsize, color=curr_color)
nsubstructures=np.shape(substructures)[2]
nbins=nsubstructures+1 #number of colors we are showing...add 1 to accoutn for unassigned contacts
unassigned_contacts[unassigned_contacts==0] = np.nan
ax.imshow(unassigned_contacts, cmap = 'gray', alpha = 0.5)
ax.plot(np.arange(0, len(native_contacts)), np.arange(0, len(native_contacts)), color='gray', linestyle=':' )
if max_res !=None:
ax.set_xlim(( max_res, 0))
ax.set_ylim((0, max_res))
| 46.97463 | 240 | 0.665376 | [
"MIT"
] | amirbitran/dbfold | dbfold/analyze_structures.py | 22,219 | Python |
from django.http import HttpResponse
from django.shortcuts import render, reverse
from django.views.decorators.csrf import csrf_exempt
import os
from twilio.rest import Client
from conversations.models import Conversation, Message
from .models import TwilioConfig, PhoneOwnership
# @validate_twilio_request
@csrf_exempt
def receive(request):
if request.method == "POST":
from_number = request.POST['From']
to_number = request.POST['To']
message = request.POST['Body']
existing_conversations = Conversation.objects.filter(external=from_number, internal=to_number)
if existing_conversations:
conversation = existing_conversations[0]
else:
owners = PhoneOwnership.objects.filter(number=to_number)
if owners:
conversation = Conversation(external=from_number, internal=to_number, user=owners[0].user)
conversation.save()
else:
return HttpResponse("Bad request: phone number not owned", status=400)
# store message
messages = Message(msg_content=message, outward=False, conversation=conversation)
messages.save()
return HttpResponse("Message received", status=200)
else:
return HttpResponse("Method not allowed", status=405)
def updateNumbers(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = TwilioConfig.objects.filter(user=request.user)[0]
client = Client(config.sid, config.token)
number_list = client.incoming_phone_numbers.list()
PhoneOwnership.objects.filter(user=request.user).delete()
for number in number_list:
# Set the webhook for the phone number
incoming_phone_number = client.incoming_phone_numbers(number.sid).update(sms_url='https://hackaway.software/twilio/receive/')
phone = PhoneOwnership(user=request.user, number=number.phone_number)
phone.save()
context = {
'config': config
}
return render(request, 'configure.html', context)
def configure(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = None
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
config = TwilioConfig.objects.filter(user=request.user)[0]
if request.method == "POST":
sid = request.POST['sid']
token = request.POST['token']
# incoming_phone_number = client.incoming_phone_numbers.create(
# sms_url='https://hackaway.software/twilio/receive',
# phone_number='+447700153842'
# )
# Obtain information
# incoming_phone_number = client.incoming_phone_numbers.create(phone_number='+447700153842')
# print(incoming_phone_number.sid)
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
configs.update(sid=sid, token=token)
config = configs[0]
else:
config = TwilioConfig(sid=sid, token=token, user=request.user)
config.save()
return updateNumbers(request)
context = {
'config': config
}
return render(request, 'configure.html', context)
def obtain_number(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = None
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
config = TwilioConfig.objects.filter(user=request.user)[0]
client = Client(config.sid, config.token)
if request.method == "POST":
incoming_phone_number = client.incoming_phone_numbers.create(
phone_number=request.POST['number'],
)
# description solution in the documentation does not work.
return updateNumbers(request)
else:
local = client.available_phone_numbers('GB').local.list(contains='+44',limit=20)
context = {
'options': local
}
return render(request, 'obtain.html', context)
else:
return HttpResponseRedirect(reverse('twilioconfig:configure'))
| 35.626016 | 134 | 0.666591 | [
"MIT"
] | ph00lt0/websms | twilioconfig/views.py | 4,382 | Python |
import numpy
import os
from src.readers import SimpleBSONReader
def read_train_example(path='../../data/train_example.bson', pca_reduction=True):
read_result = SimpleBSONReader.read_all(path)
pixels = read_result.pixel_matrix
numpy.savetxt("../../out/train_example.csv", pixels, delimiter=",", fmt='%.d')
if pca_reduction:
pixel_reduced = read_result.pixel_matrix_reduced
numpy.savetxt("../../out/pca_train_example.csv", pixel_reduced, delimiter=",", fmt='%.d')
return pixels
def read_and_save_intermediate(path='../../data/train_example.bson', pca_reduction=True,
file_out_path="../../out/train_example.csv",
reduced_file_out_path="../../out/pca_train_example.csv", root_path="../../out/",
n_components=90, first_occurence_number=1):
dirname = os.path.dirname(file_out_path)
if not os.path.exists(dirname):
os.mkdir(dirname)
read_result = SimpleBSONReader.read_all(
path,
save_intermediate=True,
save_png=True,
root_path=root_path,
first_occurence_number=first_occurence_number,
n_components=n_components)
pixels = read_result.pixel_matrix
numpy.savetxt(file_out_path, pixels, delimiter=",", fmt='%.d')
if pca_reduction:
dirname = os.path.dirname(reduced_file_out_path)
if not os.path.exists(dirname):
os.mkdir(dirname)
pixel_reduced = read_result.pixel_matrix_reduced
numpy.savetxt(reduced_file_out_path, pixel_reduced, delimiter=",", fmt='%s')
return pixels
if __name__ == "__main__":
read_and_save_intermediate()
| 36.673913 | 111 | 0.663308 | [
"Apache-2.0"
] | DSSerialGeneva/cd15c0un7 | src/read/read_train_example.py | 1,687 | Python |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import urllib.parse
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import requests
from airbyte_cdk.sources.streams.http import HttpStream
class CartStream(HttpStream, ABC):
primary_key = "id"
def __init__(self, start_date: str, store_name: str, end_date: str = None, **kwargs):
self._start_date = start_date
self._end_date = end_date
self.store_name = store_name
super().__init__(**kwargs)
@property
def url_base(self) -> str:
return f"https://{self.store_name}/api/v1/"
@property
def data_field(self) -> str:
"""
Field of the response containing data.
By default the value self.name will be used if this property is empty or None
"""
return None
def path(self, **kwargs) -> str:
return self.name
def backoff_time(self, response: requests.Response) -> Optional[float]:
"""
We dont need to check the response.status_code == 429 since this header exists only in this case.
"""
retry_after = response.headers.get("Retry-After")
if retry_after:
return float(retry_after)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
response_json = response.json()
if response_json.get("next_page"):
next_query_string = urllib.parse.urlsplit(response_json.get("next_page")).query
params = dict(urllib.parse.parse_qsl(next_query_string))
return params
def request_headers(self, **kwargs) -> Mapping[str, Any]:
return {"Cache-Control": "no-cache", "Content-Type": "application/json"}
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
response_json = response.json()
result = response_json.get(self.data_field or self.name, [])
yield from result
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {"count": 100}
if next_page_token:
params.update(next_page_token)
return params
class IncrementalCartStream(CartStream, ABC):
state_checkpoint_interval = 1000
cursor_field = "updated_at"
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""
Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md
"""
params = super().request_params(stream_state=stream_state, **kwargs)
cursor_value = stream_state.get(self.cursor_field) or self._start_date
params["sort"] = self.cursor_field
start_date = max(cursor_value, self._start_date)
query = f"gt:{start_date}"
if self._end_date and self._end_date > start_date:
query += f" AND lt:{self._end_date}"
params[self.cursor_field] = query
return params
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
"""
latest_state = latest_record.get(self.cursor_field)
current_state = current_stream_state.get(self.cursor_field) or latest_state
if current_state:
return {self.cursor_field: max(latest_state, current_state)}
return {}
class CustomersCart(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1customers/get
"""
data_field = "customers"
def path(self, **kwargs) -> str:
return self.data_field
class Orders(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1orders/get
"""
class OrderPayments(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_payments/get
"""
data_field = "payments"
class OrderItems(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_items/get
"""
data_field = "items"
class Products(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1products/get
"""
class Addresses(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/b3A6MjMzMTc3Njc-get-addresses
"""
| 32.564626 | 135 | 0.667642 | [
"MIT"
] | 52-entertainment/airbyte | airbyte-integrations/connectors/source-cart/source_cart/streams.py | 4,787 | Python |
# coding:utf-8
# usr/bin/python3
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
"""
Class Chapter28_1
Class Chapter28_2
Class Chapter28_3
Class Chapter28_4
Class Chapter28_5
"""
from __future__ import absolute_import, division, print_function
import numpy as np
class Chapter28_1:
"""
chapter28.1 note and function
"""
def __init__(self):
pass
def note(self):
"""
Summary
====
Print chapter28.1 note
Example
====
```python
Chapter28_1().note()
```
"""
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]')
print('矩阵和向量')
print('单位矩阵')
print('零矩阵')
print('对角矩阵')
print('三对角矩阵')
print('上三角矩阵')
print('下三角矩阵')
print('置换矩阵')
print('对称矩阵')
print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律')
print('矩阵的F范数和2范数')
print('向量的2范数')
print('矩阵的逆,秩和行列式')
print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵')
print('定理28.2 当且仅当A无空向量,矩阵A列满秩')
print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的')
print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质')
print(' 如果A的任何行或者列的元素为0,则det(A)=0')
print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式')
print(' A的行列式的值与其转置矩阵A^T的行列式的值相等')
print(' 行列式的任意两行(或者两列)互换,则其值异号')
print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的')
print('正定矩阵')
print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的')
print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的')
print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵')
print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C')
print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.',
'证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积',
'证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵')
print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA',
'而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵',
'证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵')
print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘',
'则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘')
print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,',
'A-1的每个元素都是实数')
print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.',
'证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的')
print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0')
print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))',
'其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)')
print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_2:
"""
chapter28.2 note and function
"""
def __init__(self):
pass
def note(self):
'''
Summary
====
Print chapter28.2 note
Example
====
```python
Chapter28_2().note()
```
'''
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)')
print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY')
print('算法概述')
print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用')
print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵')
print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)')
print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算')
print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)')
print('Strassen方法分为以下四个步骤')
print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵')
print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7')
print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7')
print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,',
'从而获得结果矩阵C的四个子矩阵r,s,t,u')
print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法')
print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大')
print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快')
print(' 3) Strassen算法不像简单方法那样具有数值稳定性')
print(' 4) 在递归层次中生成的子矩阵要消耗空间')
# ! Strassen方法的关键就是对矩阵乘法作分治递归
print('练习28.2-1 运用Strassen算法计算矩阵的乘积')
print('矩阵的乘积为:')
print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]]))
print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积',
'证明修改后的算法的运行时间为Θ(n^lg7)')
print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积',
'就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少')
print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法',
'一种使用143640次乘法的求70*70矩阵乘积的方法',
'一种使用155424次乘法的求72*72矩阵乘积的方法')
print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积')
print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,',
'并分别生成实部ac-bd和虚部ad+bc的值')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_3:
"""
chapter28.3 note and function
"""
def __init__(self):
pass
def note(self):
'''
Summary
====
Print chapter28.3 note
Example
====
```python
Chapter28_3().note()
```
'''
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程',
'其中每个矩阵或者向量元素都属于一个域,如果实数域R')
print('LUP分解求解线性方程组')
print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU')
print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵')
print('每一个非奇异矩阵A都有这样一种分解')
print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统')
print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解')
print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb')
print('正向替换与逆向替换')
print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统',
'用一个数组pi[1..n]来表示置换P')
print('LU分解的计算')
print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数',
'以便把那些方程中的第一个变量消去')
print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成')
print('LUP分解的计算')
print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0',
'除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值')
print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU')
print('练习28.3-1 运用正向替换法求解下列方程组')
print('练习28.3-2 求出下列矩阵的LU分解')
print('练习28.3-3 运用LUP分解来求解下列方程组')
print('练习28.3-4 试描述一个对角矩阵的LUP分解')
print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的')
print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵')
print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?',
'在LUP-DECOMPOSITION中的情况又是怎样?')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_4:
"""
chapter28.4 note and function
"""
def note(self):
"""
Summary
====
Print chapter28.4 note
Example
====
```python
Chapter28_4().note()
```
"""
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组')
print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵')
print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题')
print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆')
print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生')
print('根据LUP分解计算逆矩阵')
print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU')
print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解')
print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解')
print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同')
print('矩阵乘法与逆矩阵')
print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算')
print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算',
'如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法',
'如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法')
print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵',
'其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积')
print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积',
'其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)',
'则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵')
print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间',
'证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,',
'一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法')
print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间',
'证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法',
'一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法')
print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间',
'证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法',
'一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法')
print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间',
'证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法')
print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?')
print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的')
print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_5:
"""
chapter28.5 note and function
"""
def note(self):
"""
Summary
====
Print chapter28.5 note
Example
====
```python
Chapter28_5().note()
```
"""
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况')
print('引理28.9 任意对称矩阵都是非奇异矩阵')
print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的')
print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T')
print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的')
print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形')
print('最小二乘逼近')
print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)',
'其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi')
print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)')
print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式')
print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的')
print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解')
print('统计学中正态方程A^TAc=A^Ty')
print('伪逆矩阵A+=(A^TA)^-1A^T')
print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值')
print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的')
print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上')
print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的')
print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1')
print('练习28.5-6 最小二乘法求')
print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:')
print(' AA^+A=A')
print(' A^+AA^+=A^+')
print(' (AA^+)^T=AA^+')
print(' (A^+A)^T=A^+A')
print('思考题28-1 三对角线性方程组')
print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解',
'论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间')
print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'<
'可以在O(n)的时间内求出方程Ax=b的解')
print('思考题28-2 三次样条插值')
print(' 将一个曲线拟合为n个三次多项式组成')
print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
chapter28_1 = Chapter28_1()
chapter28_2 = Chapter28_2()
chapter28_3 = Chapter28_3()
chapter28_4 = Chapter28_4()
chapter28_5 = Chapter28_5()
def printchapter28note():
"""
print chapter28 note.
"""
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note()
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
if __name__ == '__main__':
printchapter28note()
else:
pass
| 36.896142 | 98 | 0.619752 | [
"Apache-2.0"
] | HideLakitu/IntroductionToAlgorithm.Python | src/chapter28/chapter28note.py | 20,506 | Python |
#Python 3.X? Could be compatitible with small tweaks.
from re import findall
#Tatatat0 2016
#Documentation:
#Virtual Memory Classes:
# Virtual_Memory(max_memory)
# maxmemory: maximum address memory can be allocated to
# chunks: list of virtual memory chunks.
# format: ((chunk1, chunk1.start_address, chunk1.allocated_memory),(chunk2,...,...))
# Functions:
# allocate(address,memory)
# creates a new Virtual_Memory_Chunk instance, allocating memory at address.
# adds new chunk to chunks attribute
# deallocate(address)
# removes allocated memory at address. Must be starting address of allocated memory
# get_memory(address,memory)
# returns the memory amount of bytes at address. Must be allocated.
# set_memory(address,new_memory)
# sets the memory at address equal to new_memory
# Virtual_Memory_Chunk(parent,start_address,memory_input,allocated_memory,architecture_class)
# parent: a pointer to the main virtual memory class instance
# start_address: is the address of the first byte in memory_input, referenceable by opcodes.
# Default is 0.
# allocated_memory: This is the amount of memory that is accessible. The memory that is accessible is equal to start_address + allocated_memory.
# Default is 100 bytes.
# memory_input: is a series of bytes represented in hex string, if its length is less than the amount allocated, extra zeros are added. Becomes Virtual_Memory_Chunk.memory upon initialization
# Default is 0.
# architecture_class: This is an open ended variable that can be used to bind into a family of architecture based encoding,decoding, and operation methods.
# Default is "None".
# Functions:
# get_memory(address,amount)
# gets amount of bytes of memory at the address specified by address
# region specified must be within the allocated memory
# set_memory(address,new_memory)
# sets the memory at address to new_memory
# region specified must be within the allocated memory
# smallest data editable is a nibble
# print_variables()
# prints the useful variables of current instance of Virtual_Memory_Chunk
#Beginning of not yet implemented
#Architecture Classes:
# Powerpc_Architecture()
# registers: The registers are r0-31,f0-31,CR,LR,PC
# Functions:
# get_registers()
# uses a generator to return a register's values.
# Powerpc_Register(value, bits)
# value = value of the register
# bits = amount of bytes the value is
#
cast = lambda reg, bits=0, nonreg=False: reg&((1<<bits)-1) if nonreg else reg.value&((1<<reg.bits)-1)
class Powerpc_Register():
__slots__ = ['value','bits']
__int__ = lambda this: int(this.value)
def __init__(self, value, bits):
self.value = value
self.bits = bits
def set(self,value,casts=False,bits=16):
if value.__class__ == Powerpc_Register: # value is a register
raise TypeError('value is a register')
self.value = value
self.value = cast(self)
#print (self.value)
if casts:
self.value = cast(self,bits)
class Powerpc_Architecture():
__slots__ = ['NS']
def __init__(self):
self.NS = dict( CR = Powerpc_Register(0,32), LR = Powerpc_Register(0,32), PC = Powerpc_Register(0,32) )
for n in range(32):
self.NS['r%i'%n] = self.NS['R%i'%n] = Powerpc_Register(0,32) # r1 == R1
self.NS['f%i'%n] = self.NS['F%i'%n] = Powerpc_Register(0,128) # f1 == F1
def get_registers(self): #generator to return registers
values = list(self.NS);#replace with iteritems in 2.X? Nevermind. Still could maybe be a different function in 2.X though.
num = 0
while num < len(values):
yield self.NS[values[num]]
num += 1;
#End of not yet implemented
class Virtual_Memory:
def __init__(self,max_memory):
if type(max_memory) != int:
raise TypeError("Max memory of virtual memory class instance must be type 'int'")
self.max_memory = max_memory
self.chunks = []
def allocate(self, address, memory):
if (address < 0) or (memory <= 0):
raise ValueError("Address or memory amount to be allocated in the Virtual Memory instance can not be negative.")
if address + memory > self.max_memory:#outside of max memory
raise IndexError("Can not allocate virtual_memory_chunks to an address outside the max_memory range of the Virtual_Memory instance." + "Attempted to allocate at " + str(hex(address)) + " for " + str(hex(memory)) + " bytes. max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
if len(self.chunks) > 0:#contains virtual memory chunks
for chunk in range(0,len(self.chunks)):
#print((hex(memory + address)))
#print(hex((self.chunks[chunk][1] + self.chunks[chunk][2])))
#print("statement 1: " , (self.chunks[chunk][1] >= address and (address + memory) < (self.chunks[chunk][1] + self.chunks[chunk][2])))
#print("statement 2: " , (self.chunks[chunk][1] == address))
#print("statement 3: " , (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])))
#print("statement 4: " , (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][2]))
#if (self.chunks[chunk][1] >= address and (memory + address) > (self.chunks[chunk][1])) or (self.chunks[chunk][1] == address) or (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])) or (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][
if ((address < self.chunks[chunk][1]) and (address + memory >= self.chunks[chunk][1])) or ((address >= self.chunks[chunk][1]) and (address <= (self.chunks[chunk][1] + self.chunks[chunk][2]))):
raise IndexError("Cannot allocate to an already allocated address. Allocation: Address: " + str(hex(address)) + ", Memory: " + str(hex(memory)) + " Overlaps allocation at " + str(hex(self.chunks[chunk][1])) + " for " + str(hex(self.chunks[chunk][2])) + " Bytes.")
self.chunks.append((Virtual_Memory_Chunk(self,address,memory),address,memory))
else:
self.chunks.append((Virtual_Memory_Chunk(self,address,memory),address,memory))
def deallocate(self,address):
if type(address) != int:
raise TypeError("Address used to dellocate memory in Virtual_Memory instance must be type 'int'. Type: " + str(type(address)))
deleted = False
for chunk in range(0,len(self.chunks)):
#print(hex(self.chunks[chunk][1]))
if self.chunks[chunk][1] == address:
del self.chunks[chunk] #deletes memory chunk
deleted = True
break
if (not deleted):
raise IndexError("Given address to deallocate memory of Virtual_Memory instance is not a correct Virtual_Memory_Chunk starting address. Address to deallocate is " + str(hex(address)))
def get_memory(self,address,memory):
if memory <= 0:
raise ValueError("Must get a positive number of memory from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
if address > self.max_memory:
raise IndexError("Can't get memory from an address outside the max_memory range of the Virtual_Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes. max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
chunk_num = "None" #initialize variable. Virtual Memory chunk to use.
for chunk in range(0,len(self.chunks)):
if self.chunks[chunk][1] <= address and (address + memory < (self.chunks[chunk][1] + self.chunks[chunk][2])):
chunk_num = chunk
break
if chunk_num == "None":#no valid chunk was found
raise IndexError("No chunk was found that has memory allocated in the memory region to get from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
current_chunk = self.chunks[chunk]
address = address - current_chunk[1]
return current_chunk[0].memory[address:address + memory]
def set_memory(self,address,new_memory):
if type(new_memory) == str:
new_memory = findall('..',new_memory.upper())
if len(new_memory) == 0:
raise ValueError("Length of memory to set in the current Virtual Memory instance must be greater than 1 byte. Address to set " + str(hex(address)))
if address > self.max_memory:
raise IndexError("Can't set memory from an address outside the max_memory range of the Virtual_Memory instance. Attempted to set at " + str(hex(address)) + ". max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
chunk_num = "None" #initialize variable. Virtual Memory chunk to use.
for chunk in range(0,len(self.chunks)):
if self.chunks[chunk][1] <= address and (address + len(new_memory) < (self.chunks[chunk][1] + self.chunks[chunk][2])):
chunk_num = chunk
break
if chunk_num == "None":#no valid chunk was found
raise IndexError("No chunk was found that has memory allocated in the memory region to get from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
current_chunk = self.chunks[chunk]
address = address - current_chunk[1]
current_chunk[0].memory[address:address + len(new_memory)] = new_memory
class Virtual_Memory_Chunk:
def __init__(self,parent,start_address=0,allocated_memory=100,memory_input="00",architecture_class="None"):
#Error checking and formatting
if type(memory_input) != str:#memory input should be in hex, as a string.
raise TypeError("Incorrect type for memory input to create virtual memory. type: " + str(type(memory_input)))
if type(start_address) != int:
if type(start_address) == str:#allows hex
if len(start_address) > 3:#can contain 0x and a number
if start_address[0:2] == "0x":
start_address = int(start_address,16)#converts the hex to int
elif len(start_address) <= 2:
if "0x" in start_address:
raise ValueError("Input for starting address of virtual memory contains no hex after the 0x")
else:
raise TypeError("Incorrect type for starting address to create virtual memory.")
else:
raise TypeError("Incorrect type for starting address to create virtual memory.")
if "0x" in memory_input: #non intrusive way to check. Allows memory_input to be less than 2 characters by not checking index [0:1]
if memory_input[0:2] == "0x":#removes "0x" from beginning if included
memory_input = memory_input[2:]#I chose to keep memory_input as a string instead of a byte array because it is faster.
if len(memory_input) > (allocated_memory * 2): #more memory given then allocated
raise IndexError("Memory inputted for creation of virtual memory exceeds the length allowed by the allocated memory")
elif len(memory_input) < (allocated_memory * 2):#less memory given then allocated
memory_input = memory_input + ("0" * ((allocated_memory * 2) - len(memory_input))) #fills unspecified memory with zeros
#else: memory given is equal to memory allocated
#initialization
self.parent = parent
self.start_address = start_address #this is the address of the first opcode, relevant to the addresses the opcodes can specify.
self.memory = findall('..',memory_input) #memory is a list of each individual byte of input
self.allocated_memory = allocated_memory#amount of memory available
self.architecture_class = architecture_class#where architecture class is used for bindings to directly input into encoding and decoding functions for the given architecture
def get_memory(self,address,amount):
if type(address) == str:
if "0x" in address:
address = int(address,16)
if type(amount) == str:
if "0x" in amount:
amount = int(amount,16)
if address < self.start_address or address > (self.start_address + self.allocated_memory):#is outside allocated memory range
raise IndexError("Address accessed by get_memory() function of Virtual Memory is outside the range of the allocated memory. Address: " + str(hex(address)) + ", Allocated Memory: " + str(hex(self.start_address)) + "-" + str(hex(self.start_address + self.allocated_memory)))
#gets amount bytes at address from memory
memory_start = address - self.start_address#internal memory of virtual memory
return self.memory[memory_start:memory_start + amount]
def set_memory(self,address,new_memory):
if type(address) == str:
if "0x" in address:
address = int(address,16)
if type(new_memory) != str:
raise IndexError("Memory Inputed by set_memory() function of Virtual Memory is not a valid type. Type: " + str(type(new_memory)))
if new_memory[0:2] == "0x":
new_memory = new_memory[2:]
memory_start = address - self.start_address#internal memory of virtual memory
if (address < self.start_address) or (address > (self.start_address + self.allocated_memory)) or (address + (len(new_memory) / 2) > (self.start_address + self.allocated_memory)): #is outside allocated memory range
raise IndexError("Address accessed by set_memory() function of Virtual Memory is outside the range of the allocated memory. Address: " + str(hex(address)) + "-" + str(hex(int(address + (len(new_memory) / 2))) + ", Allocated Memory: " + str(hex(self.start_address)) + "-" + str(hex(self.start_address + self.allocated_memory))))
if len(new_memory) % 2 != 0:#not even
new_memory = new_memory + self.memory[int(memory_start + (len(new_memory) / 2))][1]
self.memory[memory_start:int(memory_start + (len(new_memory) / 2))] = findall('..',new_memory) #updates memory
def print_variables(self):
print(self.start_address)
print(self.memory)
print(self.allocated_memory)
print(self.architecture_class)
#Memory = Virtual_Memory_Chunk("0x80000000",100,"52AA6FBB52AA60BB52AA60BB52AA60BB")
#print(Memory.get_memory("0x80000000","0xF"))
#Memory.set_memory("0x80000000","0xFFF")
#print(Memory.get_memory("0x80000000","0xF"))
Memory = Virtual_Memory(0xFFFFFFFF)
Memory.allocate(0x80000200,100)
Memory.allocate(0x80000000,100)
Memory.set_memory(0x80000002,'FAEE00112255')
print(Memory.get_memory(0x80000002,0x10))
newPPC = Powerpc_Architecture();
#for i in newPPC.get_registers():
# print(bin(int(i)))
| 67.872807 | 340 | 0.654927 | [
"MIT"
] | tatatat0/VirtualMemory | VirtualMemory.py | 15,475 | Python |
import asyncio, sys, os
from onvif import ONVIFCamera
import time
IP="192.168.1.64" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="intflow3121" # Password
XMAX = 1
XMIN = -1
XNOW = 0.5
YMAX = 1
YMIN = -1
YNOW = 0.5
Move = 0.1
Velocity = 1
Zoom = 0
positionrequest = None
ptz = None
active = False
ptz_configuration_options = None
media_profile = None
def do_move(ptz, request):
global active
if active:
ptz.Stop({'ProfileToken': request.ProfileToken})
active = True
ptz.AbsoluteMove(request)
def move_up(ptz, request):
if YNOW - Move <= -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
do_move(ptz, request)
def move_down(ptz, request):
if YNOW + Move >= 1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW + Move
do_move(ptz, request)
def move_right(ptz, request):
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
request.Position.PanTilt.y = YNOW
do_move(ptz, request)
def move_left(ptz, request):
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
request.Position.PanTilt.y = YNOW
do_move(ptz, request)
def move_upleft(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_upright(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_downleft(ptz, request):
if YNOW - Move == 1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_downright(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def Zoom_in(ptz,request):
if Zoom + Move >= 1.0:
request.Position.Zoom = 1.0
else:
request.Position.Zoom = Zoom + Move
do_move(ptz, request)
def Zoom_out(ptz,request):
if Zoom - Move <= 0.0:
request.Position.Zoom = 0.0
else:
request.Position.Zoom = Zoom - Move
do_move(ptz,request)
def setup_move():
mycam = ONVIFCamera(IP, PORT, USER, PASS)
# Create media service object
media = mycam.create_media_service()
# Create ptz service object
global ptz , ptz_configuration_options, media_profile
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
request = ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration_options = ptz.GetConfigurationOptions(request)
request_configuration = ptz.create_type('GetConfiguration')
request_configuration.PTZConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration = ptz.GetConfiguration(request_configuration)
request_setconfiguration = ptz.create_type('SetConfiguration')
request_setconfiguration.PTZConfiguration = ptz_configuration
global positionrequest
positionrequest = ptz.create_type('AbsoluteMove')
positionrequest.ProfileToken = media_profile.token
if positionrequest.Position is None :
positionrequest.Position = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
positionrequest.Position.PanTilt.space = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].URI
positionrequest.Position.Zoom.space = ptz_configuration_options.Spaces.AbsoluteZoomPositionSpace[0].URI
if positionrequest.Speed is None :
positionrequest.Speed = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
positionrequest.Speed.PanTilt.space = ptz_configuration_options.Spaces.PanTiltSpeedSpace[0].URI
def Get_Status():
# Get range of pan and tilt
global XMAX, XMIN, YMAX, YMIN, XNOW, YNOW, Velocity, Zoom
XMAX = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].XRange.Max
XMIN = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].XRange.Min
YMAX = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].YRange.Max
YMIN = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].YRange.Min
XNOW = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.PanTilt.x
YNOW = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.PanTilt.y
Velocity = ptz_configuration_options.Spaces.PanTiltSpeedSpace[0].XRange.Max
Zoom = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.Zoom.x
def readin():
"""Reading from stdin and displaying menu"""
global positionrequest, ptz
selection = sys.stdin.readline().strip("\n")
lov=[ x for x in selection.split(" ") if x != ""]
if lov:
if lov[0].lower() in ["u","up"]:
move_up(ptz,positionrequest)
elif lov[0].lower() in ["d","do","dow","down"]:
move_down(ptz,positionrequest)
elif lov[0].lower() in ["l","le","lef","left"]:
move_left(ptz,positionrequest)
elif lov[0].lower() in ["l","le","lef","left"]:
move_left(ptz,positionrequest)
elif lov[0].lower() in ["r","ri","rig","righ","right"]:
move_right(ptz,positionrequest)
elif lov[0].lower() in ["ul"]:
move_upleft(ptz,positionrequest)
elif lov[0].lower() in ["ur"]:
move_upright(ptz,positionrequest)
elif lov[0].lower() in ["dl"]:
move_downleft(ptz,positionrequest)
elif lov[0].lower() in ["dr"]:
move_downright(ptz,positionrequest)
elif lov[0].lower() in ["s","st","sto","stop"]:
ptz.Stop({'ProfileToken': positionrequest.ProfileToken})
active = False
else:
print("What are you asking?\tI only know, 'up','down','left','right', 'ul' (up left), \n\t\t\t'ur' (up right), 'dl' (down left), 'dr' (down right) and 'stop'")
print("")
print("Your command: ", end='',flush=True)
# Test Define
# def move(ptz, request):
# request.Position.PanTilt.y = -1
# request.Position.PanTilt.x = 0
# do_move(ptz,request)
if __name__ == '__main__':
setup_move()
# Get_Status()
# Zoom_out(ptz,positionrequest)
# Get_Status()
# move(ptz,positionrequest)
while True:
if active == True:
time.sleep(1)
active = False
else:
Get_Status()
move_up(ptz, positionrequest) | 30.774074 | 171 | 0.636298 | [
"MIT"
] | intflow/python-onvif-zeep | examples/AbsoluteMove.py | 8,309 | Python |
'''
Preprocessor for Foliant documentation authoring tool.
Calls Elasticsearch API to generate an index based on Markdown content.
'''
import re
import json
from os import getenv
from pathlib import Path
from urllib import request
from urllib.error import HTTPError
from markdown import markdown
from bs4 import BeautifulSoup
from foliant.preprocessors.base import BasePreprocessor
class Preprocessor(BasePreprocessor):
defaults = {
'es_url': 'http://127.0.0.1:9200/',
'index_name': '',
'index_copy_name': '',
'index_properties': {},
'actions': [
'delete',
'create'
],
'use_chapters': True,
'format': 'plaintext',
'escape_html': True,
'url_transform': [
{'\/?index\.md$': '/'},
{'\.md$': '/'},
{'^([^\/]+)': '/\g<1>'}
],
'require_env': False,
'targets': []
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = self.logger.getChild('elasticsearch')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _get_url(self, markdown_file_path: str) -> str:
url = str(markdown_file_path.relative_to(self.working_dir))
url_transformation_rules = self.options['url_transform']
if not isinstance(url_transformation_rules, list):
url_transformation_rules = [url_transformation_rules]
for url_transformation_rule in url_transformation_rules:
for pattern, replacement in url_transformation_rule.items():
url = re.sub(pattern, replacement, url)
return url
def _get_title(self, markdown_content: str) -> str or None:
headings_found = re.search(
r'^\#{1,6}\s+(.+?)(?:\s+\{\#\S+\})?\s*$',
markdown_content,
flags=re.MULTILINE
)
if headings_found:
return headings_found.group(1)
return None
def _get_chapters_paths(self) -> list:
def _recursive_process_chapters(chapters_subset):
if isinstance(chapters_subset, dict):
processed_chapters_subset = {}
for key, value in chapters_subset.items():
processed_chapters_subset[key] = _recursive_process_chapters(value)
elif isinstance(chapters_subset, list):
processed_chapters_subset = []
for item in chapters_subset:
processed_chapters_subset.append(_recursive_process_chapters(item))
elif isinstance(chapters_subset, str):
if chapters_subset.endswith('.md'):
chapters_paths.append(self.working_dir / chapters_subset)
processed_chapters_subset = chapters_subset
else:
processed_chapters_subset = chapters_subset
return processed_chapters_subset
chapters_paths = []
_recursive_process_chapters(self.config['chapters'])
self.logger.debug(f'Chapters files paths: {chapters_paths}')
return chapters_paths
def _http_request(
self,
request_url: str,
request_method: str = 'GET',
request_headers: dict or None = None,
request_data: bytes or None = None
) -> dict:
http_request = request.Request(request_url, method=request_method)
if request_headers:
http_request.headers = request_headers
if request_data:
http_request.data = request_data
try:
with request.urlopen(http_request) as http_response:
response_status = http_response.getcode()
response_headers = http_response.info()
response_data = http_response.read()
except HTTPError as http_response_not_ok:
response_status = http_response_not_ok.getcode()
response_headers = http_response_not_ok.info()
response_data = http_response_not_ok.read()
return {
'status': response_status,
'headers': response_headers,
'data': response_data
}
def _escape_html(self, content: str) -> str:
return content.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def _create_index(self, index_name: str) -> None:
if self.options['index_properties']:
create_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(
'Calling Elasticsearch API to create an index with specified properties, ' +
f'URL: {create_request_url}'
)
create_response = self._http_request(
create_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(self.options['index_properties'], ensure_ascii=False).encode('utf-8')
)
create_response_data = json.loads(create_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {create_response["status"]}')
self.logger.debug(f'Response headers: {create_response["headers"]}')
self.logger.debug(f'Response data: {create_response_data}')
if create_response['status'] == 200 and create_response_data.get('acknowledged', None) is True:
self.logger.debug('Index created')
elif create_response['status'] == 400 and create_response_data.get(
'error', {}
).get(
'type', ''
) == 'resource_already_exists_exception':
self.logger.debug('Index already exists')
else:
error_message = 'Failed to create an index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
else:
self.logger.debug('An index without specific properties will be created')
if self.options['use_chapters']:
self.logger.debug('Only files mentioned in chapters will be indexed')
markdown_files_paths = self._get_chapters_paths()
else:
self.logger.debug('All files of the project will be indexed')
markdown_files_paths = self.working_dir.rglob('*.md')
data_for_indexing = ''
for markdown_file_path in markdown_files_paths:
self.logger.debug(f'Processing the file: {markdown_file_path}')
with open(markdown_file_path, encoding='utf8') as markdown_file:
markdown_content = markdown_file.read()
if markdown_content:
url = self._get_url(markdown_file_path)
title = self._get_title(markdown_content)
if self.options['format'] == 'html' or self.options['format'] == 'plaintext':
self.logger.debug(f'Converting source Markdown content to: {self.options["format"]}')
content = markdown(markdown_content)
if self.options['format'] == 'plaintext':
soup = BeautifulSoup(content, 'lxml')
for non_text_node in soup(['style', 'script']):
non_text_node.extract()
content = soup.get_text()
if self.options['escape_html']:
self.logger.debug('Escaping HTML syntax')
if title:
title = self._escape_html(title)
content = self._escape_html(content)
else:
self.logger.debug('Leaving source Markdown content unchanged')
content = markdown_content
self.logger.debug(f'Adding the page, URL: {url}, title: {title}')
data_for_indexing += '{"index": {}}\n' + json.dumps(
{
'url': url,
'title': title,
'content': content
},
ensure_ascii=False
) + '\n'
else:
self.logger.debug('It seems that the file has no content')
self.logger.debug(f'Data for indexing: {data_for_indexing}')
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_bulk?refresh'
self.logger.debug(f'Calling Elasticsearch API to add the content to the index, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'POST',
{
'Content-Type': 'application/json; charset=utf-8'
},
data_for_indexing.encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] != 200 or update_response_data.get('errors', True):
error_message = 'Failed to add content to the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _delete_index(self, index_name: str) -> None:
delete_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/'
self.logger.debug(f'Calling Elasticsearch API to delete the index, URL: {delete_request_url}')
delete_response = self._http_request(
delete_request_url,
'DELETE'
)
delete_response_data = json.loads(delete_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {delete_response["status"]}')
self.logger.debug(f'Response headers: {delete_response["headers"]}')
self.logger.debug(f'Response data: {delete_response_data}')
if delete_response['status'] == 200 and delete_response_data.get('acknowledged', None) is True:
self.logger.debug('Index deleted')
elif delete_response['status'] == 404 and delete_response_data.get(
'error', {}
).get(
'type', ''
) == 'index_not_found_exception':
self.logger.debug('Index does not exist')
else:
error_message = 'Failed to delete the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _update_index_setting(self, index_name: str, settings_to_update: dict) -> None:
update_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_settings/'
self.logger.debug(f'Calling Elasticsearch API to update the index settings, URL: {update_request_url}')
update_response = self._http_request(
update_request_url,
'PUT',
{
'Content-Type': 'application/json; charset=utf-8'
},
json.dumps(
settings_to_update,
ensure_ascii=False
).encode('utf-8')
)
update_response_data = json.loads(update_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {update_response["status"]}')
self.logger.debug(f'Response headers: {update_response["headers"]}')
self.logger.debug(f'Response data: {update_response_data}')
if update_response['status'] == 200 and update_response_data.get('acknowledged', None) is True:
self.logger.debug('Index settings updated')
else:
error_message = 'Failed to update the index settings'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _clone_index(self, index_name: str, index_copy_name: str) -> None:
clone_request_url = f'{self.options["es_url"].rstrip("/")}/{index_name}/_clone/{index_copy_name}/'
self.logger.debug(f'Calling Elasticsearch API to clone the index, URL: {clone_request_url}')
clone_response = self._http_request(
clone_request_url,
'POST'
)
clone_response_data = json.loads(clone_response['data'].decode('utf-8'))
self.logger.debug(f'Response received, status: {clone_response["status"]}')
self.logger.debug(f'Response headers: {clone_response["headers"]}')
self.logger.debug(f'Response data: {clone_response_data}')
if clone_response['status'] == 200 and clone_response_data.get('acknowledged', None) is True:
self.logger.debug('Index cloned')
else:
error_message = 'Failed to clone the index'
self.logger.error(f'{error_message}')
raise RuntimeError(f'{error_message}')
return None
def _copy_index(self, index_name: str, index_copy_name: str) -> None:
if not index_copy_name:
index_copy_name = index_name + '_copy'
self.logger.debug(f'Copying the index {index_name} to {index_copy_name}')
self.logger.debug(f'First, marking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': True
}
}
)
self.logger.debug(f'Second, deleting the index {index_copy_name}, if exists')
self._delete_index(index_copy_name)
self.logger.debug(f'Third, cloning the index {index_name} as {index_copy_name}')
self._clone_index(index_name, index_copy_name)
self.logger.debug(f'Fourth, unmarking the index {index_name} as read-only')
self._update_index_setting(
index_name,
{
'settings': {
'index.blocks.write': False
}
}
)
self.logger.debug(f'Fifth, also unmarking the index {index_copy_name} as read-only')
self._update_index_setting(
index_copy_name,
{
'settings': {
'index.blocks.write': False
}
}
)
return None
def apply(self):
self.logger.info('Applying preprocessor')
envvar = 'FOLIANT_ELASTICSEARCH'
if not self.options['require_env'] or getenv(envvar) is not None:
self.logger.debug(
f'Allowed targets: {self.options["targets"]}, ' +
f'current target: {self.context["target"]}'
)
if not self.options['targets'] or self.context['target'] in self.options['targets']:
actions = self.options['actions']
if not isinstance(self.options['actions'], list):
actions = [actions]
for action in actions:
self.logger.debug(f'Applying action: {action}')
if action == 'create':
self._create_index(self.options['index_name'])
elif action == 'delete':
self._delete_index(self.options['index_name'])
elif action == 'copy':
self._copy_index(self.options['index_name'], self.options['index_copy_name'])
else:
self.logger.debug('Unknown action, skipping')
else:
self.logger.debug(f'Environment variable {envvar} is not set, skipping')
self.logger.info('Preprocessor applied')
| 35.238307 | 114 | 0.579446 | [
"MIT"
] | foliant-docs/foliantcontrib.elasticsearch | foliant/preprocessors/elasticsearch.py | 15,822 | Python |
from django.apps import AppConfig
class TableroConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'tablero'
| 20.857143 | 56 | 0.760274 | [
"MIT"
] | AutodidactaMx/cocid_python | Modulo_4/semana 2/miercoles/servicioweb/tablero/apps.py | 146 | Python |
__version__ = '3.0.6'
__buildinfo__ = { 'branch' : 'BRANCH_NOT_SET' , 'last_commit' : 'COMMIT_NOT_SET' }
| 21.4 | 82 | 0.672897 | [
"Apache-2.0"
] | arshiyaaggarwal/hubble | hubblestack/__init__.py | 107 | Python |
"""
Settings for different models.
"""
import attr
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class BaseSettings:
"""
Base configuration settings.
"""
val_split = attr.ib(default=0.0)
test_split = attr.ib(default=0.15)
detect_type = attr.ib(default="dual")
verbose = attr.ib(default=0)
print_summary = attr.ib(default=False, metadata={"deprecated": True})
plot_model = attr.ib(default=False, metadata={"deprecated": True})
plot_dpi = attr.ib(default=400, metadata={"deprecated": True})
output_path = attr.ib(default="./output")
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class NeuralSettings(BaseSettings):
"""
Neural model settings.
"""
language = attr.ib(default="")
series = attr.ib(default="")
batch_size = attr.ib(default=32)
skip_step = attr.ib(default=5)
token_maxlen = attr.ib(default=30)
model_type = attr.ib(default="recurrent") # recurrent
fraction = attr.ib(default=0.995) # For Native model.
prediction_policy = attr.ib(default="zero") # zero, accuracy, fscore
fscore_beta = attr.ib(default=1.0)
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class EntropiesSettings(NeuralSettings):
"""
Entropy model settings.
"""
# While not strictly a child of NeuralSettings, it seems more convenient.
tf_verbose = attr.ib(default=0)
basis = attr.ib(default="all")
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class RecurrentSettings(EntropiesSettings):
"""
Recurrent model settings.
"""
# Architecture parameters
embedding_len = attr.ib(default=32)
rnn_output_len = attr.ib(default=32)
rnn_cell_type = attr.ib(default="GRU") # GRU, LSTM
rnn_levels = attr.ib(default=1) # 1, 2
# Dropout and regulation parameters
embedding_dropout = attr.ib(default=0.0)
recurrent_l2 = attr.ib(default=0.001)
rnn_activity_l2 = attr.ib(default=0.0)
recurrent_dropout = attr.ib(default=0.0)
rnn_output_dropout = attr.ib(default=0.2)
merge_embedding_dropout = attr.ib(default=0.2)
# Model fitting parameters
epochs = attr.ib(default=45)
learning_rate = attr.ib(default=0.01)
learning_rate_decay = attr.ib(default=0.95) # Adjust for batch size, data len.
restore_best_weights = attr.ib(default=True)
@attr.s # pylint: disable=too-many-instance-attributes,too-few-public-methods
class MarkovSettings(BaseSettings):
"""
Markov model settings.
"""
model = attr.ib(default="kni")
order = attr.ib(default=3)
p = attr.ib(default=0.995) # pylint: disable=invalid-name
smoothing = attr.ib(default=0.3)
| 30.054348 | 83 | 0.687161 | [
"Apache-2.0"
] | lingpy/pybor | src/pybor/config.py | 2,765 | Python |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 32585
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351 | 90 | 0.682581 | [
"MIT"
] | Bittransfer/Bittransfer-master | contrib/linearize/linearize-hashes.py | 3,037 | Python |
from rest_framework import parsers, renderers, status
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.compat import coreapi, coreschema
from rest_framework.generics import DestroyAPIView, RetrieveAPIView
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.schemas import ManualSchema
from rest_framework.views import APIView
from django.contrib.auth import get_user_model
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import views as auth_views
from django.contrib.auth.views import LoginView as AuthLoginView
from django.contrib.auth.views import LogoutView as AuthLogoutView
from django.contrib.sites.shortcuts import get_current_site
from django.core import signing
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, TemplateView
import auditor
import conf
from api.users.forms import RegistrationForm
from api.users.utils import login_user, logout_user
from api.utils.views.post import PostAPIView
from db.models.tokens import Token
from event_manager.events.superuser import SUPERUSER_ROLE_GRANTED, SUPERUSER_ROLE_REVOKED
from event_manager.events.user import USER_ACTIVATED, USER_DELETED
from schemas.user import UserConfig
from signals import users as users_signals
class ObtainAuthToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
if coreapi is not None and coreschema is not None:
schema = ManualSchema(
fields=[
coreapi.Field(
name="username",
required=True,
location='form',
schema=coreschema.String(
title="Username",
description="Valid username for authentication",
),
),
coreapi.Field(
name="password",
required=True,
location='form',
schema=coreschema.String(
title="Password",
description="Valid password for authentication",
),
),
],
encoding="application/json",
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key})
obtain_auth_token = ObtainAuthToken.as_view()
class AuthTokenLogin(ObtainAuthToken):
"""Login user and return user's token."""
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
if not created and token.is_expired:
token.refresh()
response = Response({'token': token.key})
if request.data.get('login'):
auth_login(self.request, user)
response.set_cookie('token', value=token.key)
response.set_cookie('user', value=user.username)
return response
class AuthTokenLogout(APIView):
throttle_classes = ()
permission_classes = ()
def get(self, request, *args, **kwargs):
auth_logout(request)
response = Response()
response.delete_cookie('token')
response.delete_cookie('user')
return response
class RefreshSessionView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
auth_login(self.request, request.user)
return Response()
class LoginView(AuthLoginView):
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return HttpResponseRedirect('/')
response = super().dispatch(request, *args, **kwargs)
login_user(request=request, response=response, user=request.user, login=False)
return response
class LogoutView(AuthLogoutView):
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
logout_user(request=request, response=response, logout=False)
return response
class RegistrationView(FormView):
"""Register a new (inactive) user account, generate an activation key and email it to the user.
This is different from the model-based activation workflow in that
the activation key is the username, signed using Django's
TimestampSigner, with HMAC verification on activation.
"""
form_class = RegistrationForm
template_name = 'users/register.html'
email_body_template = 'users/activation_email.txt'
email_subject_template = 'users/activation_email_subject.txt'
success_url = 'users:registration_complete'
key_salt = 'users.tokens.RegistrationView'
def form_valid(self, form):
self.register(form)
return redirect(self.get_success_url())
def register(self, form):
new_user = self.create_inactive_user(form)
users_signals.user_registered.send(
sender=self.__class__,
user=new_user,
request=self.request)
return new_user
def create_inactive_user(self, form):
"""
Create the inactive user account and send an email containing
activation instructions.
"""
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user
def get_activation_key(self, user):
"""
Generate the activation key which will be emailed to the user.
"""
return signing.dumps(
obj=getattr(user, user.USERNAME_FIELD),
salt=self.key_salt
)
def get_email_context(self, activation_key):
"""
Build the template context used for the activation email.
"""
return {
'activation_key': activation_key,
'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'),
'site': get_current_site(self.request)
}
def send_activation_email(self, user):
"""
Send the activation email. The activation key is the username,
signed using TimestampSigner.
"""
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({
'user': user
})
subject = render_to_string(self.email_subject_template,
context)
# Force subject to a single line to avoid header-injection
# issues.
subject = ''.join(subject.splitlines())
message = render_to_string(self.email_body_template,
context)
user.email_user(subject, message, conf.get('DEFAULT_FROM_EMAIL'))
class SimpleRegistrationView(RegistrationView):
"""Registration and validation though a superuser."""
form_class = RegistrationForm
template_name = 'users/register.html'
def create_inactive_user(self, form):
"""Create the inactive user account and wait for validation from superuser"""
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user
def get(self, request, *args, **kwargs):
if not request.user.is_anonymous:
return HttpResponseRedirect('/')
return super().get(request, *args, **kwargs)
class ActivationView(TemplateView):
"""
Given a valid activation key, activate the user's
account. Otherwise, show an error message stating the account
couldn't be activated.
"""
template_name = 'users/activate.html'
success_url = 'users:registration_activation_complete'
key_salt = 'users.tokens.RegistrationView'
def activate(self, *args, **kwargs):
# This is safe even if, somehow, there's no activation key,
# because unsign() will raise BadSignature rather than
# TypeError on a value of None.
username = self.validate_key(kwargs.get('activation_key'))
if username is not None:
user = self.get_user(username)
if user is not None:
user.is_active = True
user.save()
return user
return False
def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or ``None`` if not.
"""
try:
username = signing.loads(
activation_key,
salt=self.key_salt,
max_age=conf.get('ACCOUNT_ACTIVATION_DAYS') * 86400
)
return username
# SignatureExpired is a subclass of BadSignature, so this will
# catch either one.
except signing.BadSignature:
return None
def get_user(self, username):
"""
Given the verified username, look up and return the
corresponding user account if it exists, or ``None`` if it
doesn't.
"""
User = get_user_model() # noqa
try:
user = User.objects.get(**{
User.USERNAME_FIELD: username,
'is_active': False
})
return user
except User.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
"""The base activation logic; subclasses should leave this method
alone and implement activate(), which is called from this method.
"""
activated_user = self.activate(*args, **kwargs)
if activated_user:
users_signals.user_activated.send(
sender=self.__class__,
user=activated_user,
request=request
)
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
class PasswordResetView(auth_views.PasswordResetView):
template_name = 'users/password_reset.html'
subject_template_name = 'users/password_reset_subject.txt'
email_template_name = 'users/password_reset_body.txt'
success_url = reverse_lazy('users:password_reset_done')
class TokenView(TemplateView):
template_name = 'users/token.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
token, _ = Token.objects.get_or_create(user=self.request.user)
context['token'] = token.key
return context
class UserView(RetrieveAPIView):
"""Get user details."""
def retrieve(self, request, *args, **kwargs):
user = request.user
return Response(UserConfig.obj_to_dict(user))
class ActivateView(PostAPIView):
"""Activate user."""
queryset = get_user_model().objects.filter()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_active = True
user.save()
auditor.record(event_type=USER_ACTIVATED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
class DeleteView(DestroyAPIView):
"""Delete user."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
auditor.record(event_type=USER_DELETED,
instance=instance,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
class GrantSuperuserView(PostAPIView):
"""Grant a user the superuser role."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_staff = True
user.is_superuser = True
user.save()
auditor.record(event_type=SUPERUSER_ROLE_GRANTED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
class RevokeSuperuserView(PostAPIView):
"""Revoke the superuser role from user."""
queryset = get_user_model()
permission_classes = (IsAuthenticated, IsAdminUser,)
lookup_field = 'username'
def post(self, request, *args, **kwargs):
user = self.get_object()
user.is_staff = False
user.is_superuser = False
user.save()
auditor.record(event_type=SUPERUSER_ROLE_REVOKED,
instance=user,
actor_id=self.request.user.id,
actor_name=self.request.user.username)
return Response(status=status.HTTP_200_OK)
| 35.065854 | 99 | 0.647632 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | AntoineToubhans/polyaxon | polyaxon/api/users/views.py | 14,377 | Python |
# encoding=utf8
"""Implementations of Cosine mixture functions."""
from numpy import cos, pi
from NiaPy.benchmarks.benchmark import Benchmark
__all__ = ['CosineMixture']
class CosineMixture(Benchmark):
r"""Implementations of Cosine mixture function.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Cosine Mixture Function**
:math:`f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-1, 1]`, for all :math:`i = 1, 2,..., D`.
**Global maximu:**
:math:`f(x^*) = -0.1 D`, at :math:`x^* = (0.0,...,0.0)`
LaTeX formats:
Inline:
$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$
Equation:
\begin{equation} f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2 \end{equation}
Domain:
$-1 \leq x_i \leq 1$
Reference:
http://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture
"""
Name = ['CosineMixture']
def __init__(self, Lower=-1.0, Upper=1.0):
r"""Initialize of Cosine mixture benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__`
"""
Benchmark.__init__(self, Lower, Upper)
@staticmethod
def latex_code():
r"""Return the latex code of the problem.
Returns:
str: Latex code
"""
return r'''$f(\textbf{x}) = - 0.1 \sum_{i = 1}^D \cos (5 \pi x_i) - \sum_{i = 1}^D x_i^2$'''
def function(self):
r"""Return benchmark evaluation function.
Returns:
Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function
"""
def f(D, X):
r"""Fitness function.
Args:
D (int): Dimensionality of the problem
sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.
Returns:
float: Fitness value for the solution.
"""
v1, v2 = 0.0, 0.0
for i in range(D): v1, v2 = v1 + cos(5 * pi * X[i]), v2 + X[i] ** 2
return -0.1 * v1 - v2
return f
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 25.022472 | 112 | 0.634037 | [
"MIT"
] | lucijabrezocnik/NiaPy | NiaPy/benchmarks/cosinemixture.py | 2,230 | Python |
from dearpygui.core import *
from dearpygui.simple import *
from math import cos, sin
# callbacks
def colormap_callback(sender, data):
value = get_value("Colormaps")
set_color_map("Plot", value)
def plot_callback(sender, data):
clear_plot("Plot")
data1x = []
data1y = []
for i in range(0, 100):
data1x.append(3.14 * i / 180)
data1y.append(cos(3 * 3.14 * i / 180))
data2x = []
data2y = []
for i in range(0, 100):
data2x.append(3.14 * i / 180)
data2y.append(sin(2 * 3.14 * i / 180))
add_line_series("Plot", "Cos", data1x, data1y, weight=2)
add_shade_series("Plot", "Cos", data1x, data1y, y2=[0.0]*100, weight=2, fill=[255, 0, 0, 100], color=[255, 0, 0, 100])
add_scatter_series("Plot", "Sin", data2x, data2y)
with window("Main Window"):
with tab_bar("PlotTabBar"):
with tab("Plot Widget"):
add_text("Tips")
add_text("Double click plot to scale to data", bullet=True)
add_text("Right click and drag to zoom to an area", bullet=True)
add_text("Double right click to open settings", bullet=True)
add_text("Toggle data sets on the legend to hide them", bullet=True)
add_text("Click and drag in the plot area to pan", bullet=True)
add_text("Scroll mouse wheel in the plot area to zoom", bullet=True)
add_text("Click and drag on an axis to just pan that dimension", bullet=True)
add_text("Scroll mouse wheel on an axis to just scale that dimension", bullet=True)
add_button("Plot data", callback=plot_callback)
add_listbox("Colormaps", items=["Default", "Dark", "Pastel", "Paired", "Viridis",
"Plasma", "Hot", "Cool", "Pink", "Jet"],
width=500, num_items=3, callback=colormap_callback)
add_plot("Plot", height=-1)
with tab("Simple Plots"):
add_simple_plot("Simpleplot1", value=[0.3, 0.9, 2.5, 8.9], height=300)
add_simple_plot("Simpleplot2", value=[0.3, 0.9, 2.5, 8.9], overlay="Overlaying",
height=180, histogram=True)
start_dearpygui(primary_window="Main Window") | 42.981132 | 122 | 0.585601 | [
"MIT"
] | DynamicCai/Dynamic-Explorer | test/plot_example.py | 2,278 | Python |
# Read PV metadata and timeseries data
# Based on code in https://github.com/openclimatefix/pvoutput
# E.g. https://nbviewer.jupyter.org/github/openclimatefix/pvoutput/blob/master/examples/analyse_PV_data_for_9th_Aug_2019.ipynb
import cartopy.crs as ccrs
import numpy as np
import pandas as pd
import xarray as xr
METADATA_FILENAME = "data/PV/PVOutput.org/UK_PV_metadata.csv"
PV_STATS_FILENAME = "data/PV/PVOutput.org/UK_PV_stats.csv"
TIMESERIES_FILENAME = "data/PV/PVOutput.org/UK_PV_timeseries_batch.nc"
START_DATE = "2019-08-09"
END_DATE = "2019-08-09"
def load_pv_systems(
metadata_filename: str = METADATA_FILENAME,
stats_filename: str = PV_STATS_FILENAME,
timeseries_filename: str = TIMESERIES_FILENAME,
) -> xr.Dataset:
"""Load metadata about PV systems"""
# Load metadata
pv_metadata = pd.read_csv(metadata_filename, index_col="system_id")
# Load stats
pv_stats = pd.read_csv(
stats_filename,
index_col="system_id",
parse_dates=["actual_date_from", "actual_date_to", "record_efficiency_date"],
)
# Join
pv_systems = pv_metadata.join(
pv_stats[["actual_date_from", "actual_date_to", "outputs"]], how="left"
)
# Filter out systems with only a few outputs, and with no location
pv_systems_filtered = pv_systems.query(
"status_interval_minutes <= 60 and outputs > 100"
)
pv_systems_filtered = pv_systems_filtered.dropna(subset=["latitude", "longitude"])
# Restrict to systems that have timeseries data
system_ids = _get_system_ids_dataframe_from_timeseries(timeseries_filename)
pv_systems_filtered = pv_systems_filtered.join(system_ids, how="inner")
# Retain salient columns
pv_systems_filtered = pv_systems_filtered[["system_name", "latitude", "longitude"]]
# Convert to xarray
ds = xr.Dataset.from_dataframe(pv_systems_filtered)
# Convert latitude/longitude to easting/northing
ds = _transform_pv_systems(ds)
return ds
def _get_system_ids_dataframe_from_timeseries(
timeseries_filename: str = TIMESERIES_FILENAME,
) -> pd.DataFrame:
"""Get all the PV system IDs from the timeseries file"""
ds = xr.open_dataset(timeseries_filename)
system_ids = [int(x) for x in list(ds.data_vars.keys())]
df = pd.DataFrame({"system_id": system_ids})
df = df.set_index("system_id")
return df
def _transform_pv_systems(pv_systems: xr.Dataset) -> xr.Dataset:
"""Transform the system locations into the same coordinate system used by UKV"""
system_latitudes, system_longitudes = (
pv_systems["latitude"].values,
pv_systems["longitude"].values,
)
wgs84 = ccrs.Geodetic()
ukv_crs = ccrs.OSGB(approx=False)
locs = ukv_crs.transform_points(
src_crs=wgs84,
x=np.asanyarray(system_longitudes),
y=np.asanyarray(system_latitudes),
)[:, :-1]
new_coords = {
"easting": (["system_id"], locs[:, 0].astype("int32")),
"northing": (["system_id"], locs[:, 1].astype("int32")),
}
return pv_systems.assign_coords(new_coords)
# This is unused, but a useful check
def _transform_pv_systems_pyproj(pv_systems: xr.Dataset) -> xr.Dataset:
"""Transform the system locations into the same coordinate system used by UKV, using pyproj"""
import pyproj
system_latitudes, system_longitudes = (
pv_systems["latitude"].values,
pv_systems["longitude"].values,
)
transformer = pyproj.Transformer.from_crs("epsg:4326", "epsg:27700", always_xy=True)
locs = transformer.transform(
np.asanyarray(system_longitudes), np.asanyarray(system_latitudes)
)
print(locs)
new_coords = {
"easting": (["system_id"], locs[0]),
"northing": (["system_id"], locs[1]),
}
return pv_systems.assign_coords(new_coords)
def load_pv_timeseries(
start_date: str,
end_date: str,
metadata_filename: str = METADATA_FILENAME,
stats_filename: str = PV_STATS_FILENAME,
timeseries_filename: str = TIMESERIES_FILENAME,
) -> xr.Dataset:
"""Load the PV timeseries as an xarray dataset, restricted to a given time range, and including location metadata."""
ds = xr.open_dataset(timeseries_filename)
# Subset to given time range
subset = ds.sel(datetime=slice(start_date, end_date))
# Drop systems with no readings during this time
# I couldn't see how to do this with xarray, see https://stackoverflow.com/questions/52553925/python-xarray-remove-coordinates-with-all-missing-variables
df = subset.to_dataframe()
df = df.dropna(axis=1, how="all")
# Restrict to systems that are in the intersection of those in PV metadata and PV timeseries
pv_df = load_pv_systems(
metadata_filename, stats_filename, timeseries_filename
).to_dataframe()
pv_metadata_system_ids = pv_df.index.tolist() # indexed by system_id
timeseries_system_ids = [int(system_id) for system_id in df.columns.tolist()]
system_ids = list(
set(pv_metadata_system_ids).intersection(set(timeseries_system_ids))
)
system_id_columns = [str(system_id) for system_id in system_ids]
df = df[system_id_columns]
# Reshape table into tall and narrow form - this avoids one data variable per system in xarray
df["datetime"] = df.index
df = pd.melt(df, id_vars=["datetime"], var_name="system_id", value_name="pv_yield")
df = df.astype({"system_id": "int64"})
df = df.set_index(["system_id", "datetime"])
# Convert back to xarray
ds = xr.Dataset.from_dataframe(df)
# Add lat/long and easting/northing coordinates by doing a pandas lookup for each system
new_coords = {
"latitude": (
["system_id"],
pv_df.lookup(system_ids, ["latitude"] * len(system_ids)),
),
"longitude": (
["system_id"],
pv_df.lookup(system_ids, ["longitude"] * len(system_ids)),
),
"easting": (
["system_id"],
pv_df.lookup(system_ids, ["easting"] * len(system_ids)),
),
"northing": (
["system_id"],
pv_df.lookup(system_ids, ["northing"] * len(system_ids)),
),
}
ds = ds.assign_coords(new_coords)
return ds
if __name__ == "__main__":
pv_timeseries = load_pv_timeseries(START_DATE, END_DATE)
print(pv_timeseries)
pv_timeseries.to_netcdf("data/tmp/pv_timeseries.nc")
| 33.757895 | 157 | 0.68787 | [
"MIT"
] | openclimatefix/predict_pv_yield_nwp | predict_pv_yield_nwp/pv.py | 6,414 | Python |
"""
noxfile
~~~~~~~
Nox configuration script
"""
# pylint: disable=resource-leakage,3rd-party-module-not-gated
import datetime
import glob
import os
import shutil
import sys
import tempfile
# fmt: off
if __name__ == "__main__":
sys.stderr.write(
"Do not execute this file directly. Use nox instead, it will know how to handle this file\n"
)
sys.stderr.flush()
exit(1)
# fmt: on
import nox # isort:skip
from nox.command import CommandFailed # isort:skip
IS_PY3 = sys.version_info > (2,)
# Be verbose when runing under a CI context
CI_RUN = (
os.environ.get("JENKINS_URL")
or os.environ.get("CI")
or os.environ.get("DRONE") is not None
)
PIP_INSTALL_SILENT = CI_RUN is False
SKIP_REQUIREMENTS_INSTALL = "SKIP_REQUIREMENTS_INSTALL" in os.environ
EXTRA_REQUIREMENTS_INSTALL = os.environ.get("EXTRA_REQUIREMENTS_INSTALL")
# Global Path Definitions
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, "tests", "support", "coverage")
IS_DARWIN = sys.platform.lower().startswith("darwin")
IS_WINDOWS = sys.platform.lower().startswith("win")
IS_FREEBSD = sys.platform.lower().startswith("freebsd")
# Python versions to run against
_PYTHON_VERSIONS = ("3", "3.5", "3.6", "3.7", "3.8", "3.9")
# Nox options
# Reuse existing virtualenvs
nox.options.reuse_existing_virtualenvs = True
# Don't fail on missing interpreters
nox.options.error_on_missing_interpreters = False
# Change current directory to REPO_ROOT
os.chdir(REPO_ROOT)
RUNTESTS_LOGFILE = os.path.join(
"artifacts",
"logs",
"runtests-{}.log".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S.%f")),
)
# Prevent Python from writing bytecode
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
def find_session_runner(session, name, **kwargs):
for s, _ in session._runner.manifest.list_all_sessions():
if name not in s.signatures:
continue
for signature in s.signatures:
for key, value in kwargs.items():
param = "{}={!r}".format(key, value)
if IS_PY3:
# Under Python2 repr unicode string are always "u" prefixed, ie, u'a string'.
param = param.replace("u'", "'")
if param not in signature:
break
else:
return s
continue
session.error(
"Could not find a nox session by the name {!r} with the following keyword arguments: {!r}".format(
name, kwargs
)
)
def _create_ci_directories():
for dirname in ("logs", "coverage", "xml-unittests-output"):
path = os.path.join("artifacts", dirname)
if not os.path.exists(path):
os.makedirs(path)
def _get_session_python_version_info(session):
try:
version_info = session._runner._real_python_version_info
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
session_py_version = session.run(
"python",
"-c",
'import sys; sys.stdout.write("{}.{}.{}".format(*sys.version_info))',
silent=True,
log=False,
)
version_info = tuple(
int(part) for part in session_py_version.split(".") if part.isdigit()
)
session._runner._real_python_version_info = version_info
finally:
session._runner.global_config.install_only = old_install_only_value
return version_info
def _get_session_python_site_packages_dir(session):
try:
site_packages_dir = session._runner._site_packages_dir
except AttributeError:
old_install_only_value = session._runner.global_config.install_only
try:
# Force install only to be false for the following chunk of code
# For additional information as to why see:
# https://github.com/theacodes/nox/pull/181
session._runner.global_config.install_only = False
site_packages_dir = session.run(
"python",
"-c",
"import sys; from distutils.sysconfig import get_python_lib; sys.stdout.write(get_python_lib())",
silent=True,
log=False,
)
session._runner._site_packages_dir = site_packages_dir
finally:
session._runner.global_config.install_only = old_install_only_value
return site_packages_dir
def _get_pydir(session):
version_info = _get_session_python_version_info(session)
if version_info < (3, 5):
session.error("Only Python >= 3.5 is supported")
if IS_WINDOWS and version_info < (3, 6):
session.error("Only Python >= 3.6 is supported on Windows")
return "py{}.{}".format(*version_info)
def _install_system_packages(session):
"""
Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv
"""
version_info = _get_session_python_version_info(session)
py_version_keys = ["{}".format(*version_info), "{}.{}".format(*version_info)]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
session_site_packages_dir = os.path.relpath(session_site_packages_dir, REPO_ROOT)
for py_version in py_version_keys:
dist_packages_path = "/usr/lib/python{}/dist-packages".format(py_version)
if not os.path.isdir(dist_packages_path):
continue
for aptpkg in glob.glob(os.path.join(dist_packages_path, "*apt*")):
src = os.path.realpath(aptpkg)
dst = os.path.join(session_site_packages_dir, os.path.basename(src))
if os.path.exists(dst):
session.log("Not overwritting already existing %s with %s", dst, src)
continue
session.log("Copying %s into %s", src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
def _get_pip_requirements_file(session, transport, crypto=None, requirements_type="ci"):
assert requirements_type in ("ci", "pkg")
pydir = _get_pydir(session)
if IS_WINDOWS:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-windows.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "windows-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
elif IS_DARWIN:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-darwin.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "darwin-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
elif IS_FREEBSD:
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-freebsd.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "freebsd-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
else:
_install_system_packages(session)
if crypto is None:
_requirements_file = os.path.join(
"requirements",
"static",
requirements_type,
pydir,
"{}-linux.txt".format(transport),
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
_requirements_file = os.path.join(
"requirements", "static", requirements_type, pydir, "linux-crypto.txt"
)
if os.path.exists(_requirements_file):
return _requirements_file
def _upgrade_pip_setuptools_and_wheel(session):
if SKIP_REQUIREMENTS_INSTALL:
session.log(
"Skipping Python Requirements because SKIP_REQUIREMENTS_INSTALL was found in the environ"
)
return False
install_command = [
"python",
"-m",
"pip",
"install",
"--progress-bar=off",
"-U",
"pip>=20.2.4,<21.2",
"setuptools!=50.*,!=51.*,!=52.*",
"wheel",
]
session.run(*install_command, silent=PIP_INSTALL_SILENT)
return True
def _install_requirements(
session, transport, *extra_requirements, requirements_type="ci"
):
if not _upgrade_pip_setuptools_and_wheel(session):
return
# Install requirements
requirements_file = _get_pip_requirements_file(
session, transport, requirements_type=requirements_type
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if extra_requirements:
install_command = ["--progress-bar=off"]
install_command += list(extra_requirements)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if EXTRA_REQUIREMENTS_INSTALL:
session.log(
"Installing the following extra requirements because the"
" EXTRA_REQUIREMENTS_INSTALL environment variable was set: %s",
EXTRA_REQUIREMENTS_INSTALL,
)
# We pass --constraint in this step because in case any of these extra dependencies has a requirement
# we're already using, we want to maintain the locked version
install_command = ["--progress-bar=off", "--constraint", requirements_file]
install_command += EXTRA_REQUIREMENTS_INSTALL.split()
session.install(*install_command, silent=PIP_INSTALL_SILENT)
def _run_with_coverage(session, *test_cmd, env=None):
if SKIP_REQUIREMENTS_INSTALL is False:
session.install(
"--progress-bar=off", "coverage==5.2", silent=PIP_INSTALL_SILENT
)
session.run("coverage", "erase")
python_path_env_var = os.environ.get("PYTHONPATH") or None
if python_path_env_var is None:
python_path_env_var = SITECUSTOMIZE_DIR
else:
python_path_entries = python_path_env_var.split(os.pathsep)
if SITECUSTOMIZE_DIR in python_path_entries:
python_path_entries.remove(SITECUSTOMIZE_DIR)
python_path_entries.insert(0, SITECUSTOMIZE_DIR)
python_path_env_var = os.pathsep.join(python_path_entries)
if env is None:
env = {}
env.update(
{
# The updated python path so that sitecustomize is importable
"PYTHONPATH": python_path_env_var,
# The full path to the .coverage data file. Makes sure we always write
# them to the same directory
"COVERAGE_FILE": os.path.abspath(os.path.join(REPO_ROOT, ".coverage")),
# Instruct sub processes to also run under coverage
"COVERAGE_PROCESS_START": os.path.join(REPO_ROOT, ".coveragerc"),
}
)
try:
session.run(*test_cmd, env=env)
finally:
# Always combine and generate the XML coverage report
try:
session.run("coverage", "combine")
except CommandFailed:
# Sometimes some of the coverage files are corrupt which would trigger a CommandFailed
# exception
pass
# Generate report for salt code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "salt.xml"),
"--omit=tests/*",
"--include=salt/*",
)
# Generate report for tests code coverage
session.run(
"coverage",
"xml",
"-o",
os.path.join("artifacts", "coverage", "tests.xml"),
"--omit=salt/*",
"--include=tests/*",
)
# Move the coverage DB to artifacts/coverage in order for it to be archived by CI
shutil.move(".coverage", os.path.join("artifacts", "coverage", ".coverage"))
def _runtests(session):
session.error(
"""\n\nruntests.py support has been removed from Salt. Please try `nox -e '{0}'` """
"""or `nox -e '{0}' -- --help` to know more about the supported CLI flags.\n"""
"For more information, please check "
"https://docs.saltproject.io/en/latest/topics/development/tests/index.html#running-the-tests\n..".format(
session._runner.global_config.sessions[0].replace("runtests", "pytest")
)
)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def runtests_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def runtests(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp")
@nox.parametrize("coverage", [False, True])
def runtests_tcp(session, coverage):
"""
runtests.py session with TCP transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq(session, coverage):
"""
runtests.py session with zeromq transport and default crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_m2crypto(session, coverage):
"""
runtests.py session with TCP transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_m2crypto(session, coverage):
"""
runtests.py session with zeromq transport and m2crypto
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_tcp_pycryptodome(session, coverage):
"""
runtests.py session with TCP transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def runtests_zeromq_pycryptodome(session, coverage):
"""
runtests.py session with zeromq transport and pycryptodome
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-cloud")
@nox.parametrize("coverage", [False, True])
def runtests_cloud(session, coverage):
"""
runtests.py cloud tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="runtests-tornado")
@nox.parametrize("coverage", [False, True])
def runtests_tornado(session, coverage):
"""
runtests.py tornado tests session
"""
_runtests(session)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-parametrized")
@nox.parametrize("coverage", [False, True])
@nox.parametrize("transport", ["zeromq", "tcp"])
@nox.parametrize("crypto", [None, "m2crypto", "pycryptodome"])
def pytest_parametrized(session, coverage, transport, crypto):
"""
DO NOT CALL THIS NOX SESSION DIRECTLY
"""
# Install requirements
_install_requirements(session, transport)
if crypto:
session.run(
"pip",
"uninstall",
"-y",
"m2crypto",
"pycrypto",
"pycryptodome",
"pycryptodomex",
silent=True,
)
install_command = [
"--progress-bar=off",
"--constraint",
_get_pip_requirements_file(session, transport, crypto=True),
]
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--transport={}".format(transport),
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize("coverage", [False, True])
def pytest(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp")
@nox.parametrize("coverage", [False, True])
def pytest_tcp(session, coverage):
"""
pytest session with TCP transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq(session, coverage):
"""
pytest session with zeromq transport and default crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto=None,
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_m2crypto(session, coverage):
"""
pytest session with TCP transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-m2crypto")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_m2crypto(session, coverage):
"""
pytest session with zeromq transport and m2crypto
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="m2crypto",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tcp-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_tcp_pycryptodome(session, coverage):
"""
pytest session with TCP transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="tcp",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-zeromq-pycryptodome")
@nox.parametrize("coverage", [False, True])
def pytest_zeromq_pycryptodome(session, coverage):
"""
pytest session with zeromq transport and pycryptodome
"""
session.notify(
find_session_runner(
session,
"pytest-parametrized-{}".format(session.python),
coverage=coverage,
crypto="pycryptodome",
transport="zeromq",
)
)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-cloud")
@nox.parametrize("coverage", [False, True])
def pytest_cloud(session, coverage):
"""
pytest cloud tests session
"""
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "cloud.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
"--run-expensive",
"-k",
"cloud",
] + session.posargs
_pytest(session, coverage, cmd_args)
@nox.session(python=_PYTHON_VERSIONS, name="pytest-tornado")
@nox.parametrize("coverage", [False, True])
def pytest_tornado(session, coverage):
"""
pytest tornado tests session
"""
# Install requirements
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
session.install(
"--progress-bar=off", "tornado==5.0.2", silent=PIP_INSTALL_SILENT
)
session.install(
"--progress-bar=off", "pyzmq==17.0.0", silent=PIP_INSTALL_SILENT
)
cmd_args = [
"--rootdir",
REPO_ROOT,
"--log-file={}".format(RUNTESTS_LOGFILE),
"--log-file-level=debug",
"--show-capture=no",
"-ra",
"-s",
] + session.posargs
_pytest(session, coverage, cmd_args)
def _pytest(session, coverage, cmd_args):
# Create required artifacts directories
_create_ci_directories()
env = {"CI_RUN": "1" if CI_RUN else "0"}
if IS_DARWIN:
# Don't nuke our multiprocessing efforts objc!
# https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr
env["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
if CI_RUN:
# We'll print out the collected tests on CI runs.
# This will show a full list of what tests are going to run, in the right order, which, in case
# of a test suite hang, helps us pinpoint which test is hanging
session.run(
"python", "-m", "pytest", *(cmd_args + ["--collect-only", "-qqq"]), env=env
)
try:
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args,
env=env
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
except CommandFailed: # pylint: disable=try-except-raise
# Not rerunning failed tests for now
raise
# pylint: disable=unreachable
# Re-run failed tests
session.log("Re-running failed tests")
for idx, parg in enumerate(cmd_args):
if parg.startswith("--junitxml="):
cmd_args[idx] = parg.replace(".xml", "-rerun-failed.xml")
cmd_args.append("--lf")
if coverage is True:
_run_with_coverage(
session,
"python",
"-m",
"coverage",
"run",
"-m",
"pytest",
"--showlocals",
*cmd_args
)
else:
session.run("python", "-m", "pytest", *cmd_args, env=env)
# pylint: enable=unreachable
class Tee:
"""
Python class to mimic linux tee behaviour
"""
def __init__(self, first, second):
self._first = first
self._second = second
def write(self, b):
wrote = self._first.write(b)
self._first.flush()
self._second.write(b)
self._second.flush()
def fileno(self):
return self._first.fileno()
def _lint(session, rcfile, flags, paths, tee_output=True):
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, "zeromq")
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "lint.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
if tee_output:
session.run("pylint", "--version")
pylint_report_path = os.environ.get("PYLINT_REPORT")
cmd_args = ["pylint", "--rcfile={}".format(rcfile)] + list(flags) + list(paths)
cmd_kwargs = {"env": {"PYTHONUNBUFFERED": "1"}}
if tee_output:
stdout = tempfile.TemporaryFile(mode="w+b")
cmd_kwargs["stdout"] = Tee(stdout, sys.__stdout__)
lint_failed = False
try:
session.run(*cmd_args, **cmd_kwargs)
except CommandFailed:
lint_failed = True
raise
finally:
if tee_output:
stdout.seek(0)
contents = stdout.read()
if contents:
if IS_PY3:
contents = contents.decode("utf-8")
else:
contents = contents.encode("utf-8")
sys.stdout.write(contents)
sys.stdout.flush()
if pylint_report_path:
# Write report
with open(pylint_report_path, "w") as wfh:
wfh.write(contents)
session.log("Report file written to %r", pylint_report_path)
stdout.close()
def _lint_pre_commit(session, rcfile, flags, paths):
if "VIRTUAL_ENV" not in os.environ:
session.error(
"This should be running from within a virtualenv and "
"'VIRTUAL_ENV' was not found as an environment variable."
)
if "pre-commit" not in os.environ["VIRTUAL_ENV"]:
session.error(
"This should be running from within a pre-commit virtualenv and "
"'VIRTUAL_ENV'({}) does not appear to be a pre-commit virtualenv.".format(
os.environ["VIRTUAL_ENV"]
)
)
from nox.virtualenv import VirtualEnv
# Let's patch nox to make it run inside the pre-commit virtualenv
try:
session._runner.venv = VirtualEnv( # pylint: disable=unexpected-keyword-arg
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
venv=True,
)
except TypeError:
# This is still nox-py2
session._runner.venv = VirtualEnv(
os.environ["VIRTUAL_ENV"],
interpreter=session._runner.func.python,
reuse_existing=True,
)
_lint(session, rcfile, flags, paths, tee_output=False)
@nox.session(python="3")
def lint(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
session.notify("lint-salt-{}".format(session.python))
session.notify("lint-tests-{}".format(session.python))
@nox.session(python="3", name="lint-salt")
def lint_salt(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/", "tasks/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python="3", name="lint-tests")
def lint_tests(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-salt-pre-commit")
def lint_salt_pre_commit(session):
"""
Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["setup.py", "noxfile.py", "salt/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python=False, name="lint-tests-pre-commit")
def lint_tests_pre_commit(session):
"""
Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.
"""
flags = ["--disable=I"]
if session.posargs:
paths = session.posargs
else:
paths = ["tests/"]
_lint_pre_commit(session, ".pylintrc", flags, paths)
@nox.session(python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs(session, compress, update, clean):
"""
Build Salt's Documentation
"""
session.notify("docs-html-{}(compress={})".format(session.python, compress))
session.notify(
find_session_runner(
session,
"docs-man-{}".format(session.python),
compress=compress,
update=update,
clean=clean,
)
)
@nox.session(name="docs-html", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("compress", [False, True])
def docs_html(session, compress, clean):
"""
Build Salt's HTML Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "html", "SPHINXOPTS=-W", external=True)
if compress:
session.run("tar", "-cJvf", "html-archive.tar.xz", "_build/html", external=True)
os.chdir("..")
@nox.session(name="docs-man", python="3")
@nox.parametrize("clean", [False, True])
@nox.parametrize("update", [False, True])
@nox.parametrize("compress", [False, True])
def docs_man(session, compress, update, clean):
"""
Build Salt's Manpages Documentation
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "docs.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir("doc/")
if clean:
session.run("make", "clean", external=True)
session.run("make", "man", "SPHINXOPTS=-W", external=True)
if update:
session.run("rm", "-rf", "man/", external=True)
session.run("cp", "-Rp", "_build/man", "man/", external=True)
if compress:
session.run("tar", "-cJvf", "man-archive.tar.xz", "_build/man", external=True)
os.chdir("..")
@nox.session(name="invoke", python="3")
def invoke(session):
"""
Run invoke tasks
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "invoke.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd = ["inv"]
files = []
# Unfortunately, invoke doesn't support the nargs functionality like argpase does.
# Let's make it behave properly
for idx, posarg in enumerate(session.posargs):
if idx == 0:
cmd.append(posarg)
continue
if posarg.startswith("--"):
cmd.append(posarg)
continue
files.append(posarg)
if files:
cmd.append("--files={}".format(" ".join(files)))
session.run(*cmd)
@nox.session(name="changelog", python="3")
@nox.parametrize("draft", [False, True])
def changelog(session, draft):
"""
Generate salt's changelog
"""
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join(
"requirements", "static", "ci", _get_pydir(session), "changelog.txt"
)
install_command = ["--progress-bar=off", "-r", requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
town_cmd = ["towncrier", "--version={}".format(session.posargs[0])]
if draft:
town_cmd.append("--draft")
session.run(*town_cmd)
| 32.522212 | 135 | 0.614574 | [
"Apache-2.0"
] | 99-lives/salt | noxfile.py | 35,872 | Python |
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"species": {"type": "string"},
"breed": {"type": "string"},
"age": {"type": "number"},
"store": {"type": "string"},
"price": {"type": "string"},
"received_date": {"type": "string", "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$"},
},
"required": ["name", "species", "breed", "age", "store", "price", "received_date"]
}
| 34.714286 | 115 | 0.442387 | [
"Apache-2.0"
] | xod442/pets-api | pet/schema.py | 486 | Python |
# coding:utf8
from .api import *
from .helpers import get_stock_codes, update_stock_codes
__version__ = "0.5.14"
__author__ = "shidenggui"
| 20 | 56 | 0.771429 | [
"MIT"
] | Gavin-HZ/easyquotation | easyquotation/__init__.py | 140 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset, build_dataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
Timer,
copy_model_to_gpu,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
update_classy_state,
)
from classy_vision.generic.util import get_torch_version
from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks
from classy_vision.losses import ClassyLoss, build_loss
from classy_vision.meters import ClassyMeter, build_meters
from classy_vision.models import ClassyModel, build_model
from classy_vision.optim import (
ClassyOptimizer,
build_optimizer,
build_optimizer_schedulers,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task
from .classy_task import ClassyTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("classification_task")
class ClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "spawn"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8, 0]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"], config["mixup"].get("num_classes")
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes """
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group,
ddp_comm_hooks.default_hooks.fp16_compress_hook,
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
# some settings are different in test only
self.train = False if self.test_only else state["train"]
if not self.test_only:
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
self.base_model.set_classy_state(state["base_model"])
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.check_inf_nan(loss)
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end("train")
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end("total")
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
if not self.train:
return
start_time = (
self.phase_start_time_train
if tag == "train"
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{
"tag": tag,
"phase_idx": self.train_phase_idx,
"epoch_duration": phase_duration,
"im_per_sec": im_per_sec,
}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
| 36.825149 | 114 | 0.626573 | [
"MIT"
] | hahaxun/ClassyVision | classy_vision/tasks/classification_task.py | 49,493 | Python |
"""The tests for day17."""
from days import day17
from ddt import ddt, data, unpack
import unittest
import helpers
@ddt
class MyTestCase(unittest.TestCase): # noqa D101
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '57'])
@unpack
def test_example_a(self, test_input, expected): # noqa D102
result = day17.part_a(test_input)
self.assertEqual(result, expected)
def test_answer_part_a(self): # noqa D102
result = day17.part_a(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '38021')
@data(
[[
'x=495, y=2..7',
'y=7, x=495..501',
'x=501, y=3..7',
'x=498, y=2..4',
'x=506, y=1..2',
'x=498, y=10..13',
'x=504, y=10..13',
'y=13, x=498..504'], '29'])
@unpack
def test_example_b(self, test_input, expected): # noqa D102
result = day17.part_b(test_input)
self.assertEqual(result, expected)
def test_answer_part_b(self): # noqa D102
result = day17.part_b(helpers.get_file_contents('day17.txt'))
self.assertEqual(result, '32069')
| 28.808511 | 69 | 0.521418 | [
"MIT"
] | frangiz/AdventOfCode2018 | test/test_day17.py | 1,354 | Python |
"""
pyart.lazydict
==============
A dictionary-like class supporting lazy loading of specified keys.
.. autosummary::
:toctree: generated/
:template: dev_template.rst
LazyLoadDict
"""
try:
# Python 3
from collections.abc import MutableMapping
except ImportError:
# Python 2.7, will be removed in next release after Py-ART Impressionism.
from collections import MutableMapping
import itertools
class LazyLoadDict(MutableMapping):
"""
A dictionary-like class supporting lazy loading of specified keys.
Keys which are lazy loaded are specified using the set_lazy method.
The callable object which produces the specified key is provided as the
second argument to this method. This object gets called when the value
of the key is loaded. After this initial call the results is cached
in the traditional dictionary which is used for supplemental access to
this key.
Testing for keys in this dictionary using the "key in d" syntax will
result in the loading of a lazy key, use "key in d.keys()" to prevent
this evaluation.
The comparison methods, __cmp__, __ge__, __gt__, __le__, __lt__, __ne__,
nor the view methods, viewitems, viewkeys, viewvalues, are implemented.
Neither is the the fromkeys method.
Parameters
----------
dic : dict
Dictionary containing key, value pairs which will be stored and
evaluated traditionally. This dictionary referenced not copied into
the LazyLoadDictionary and hence changed to this dictionary may change
the original. If this behavior is not desired copy dic in the
initalization.
Examples
--------
>>> d = LazyLoadDict({'key1': 'value1', 'key2': 'value2'})
>>> d.keys()
['key2', 'key1']
>>> lazy_func = lambda : 999
>>> d.set_lazy('lazykey1', lazy_func)
>>> d.keys()
['key2', 'key1', 'lazykey1']
>>> d['lazykey1']
999
"""
def __init__(self, dic):
""" initalize. """
self._dic = dic
self._lazyload = {}
# abstract methods
def __setitem__(self, key, value):
""" Set a key which will not be stored and evaluated traditionally. """
self._dic[key] = value
if key in self._lazyload:
del self._lazyload[key]
def __getitem__(self, key):
""" Get the value of a key, evaluating a lazy key if needed. """
if key in self._lazyload:
value = self._lazyload[key]()
self._dic[key] = value
del self._lazyload[key]
return self._dic[key]
def __delitem__(self, key):
""" Remove a lazy or traditional key from the dictionary. """
if key in self._lazyload:
del self._lazyload[key]
else:
del self._dic[key]
def __iter__(self):
""" Iterate over all lazy and traditional keys. """
return itertools.chain(self._dic.copy(), self._lazyload.copy())
def __len__(self):
""" Return the number of traditional and lazy keys. """
return len(self._dic) + len(self._lazyload)
# additional class to mimic dict behavior
def __str__(self):
""" Return a string representation of the object. """
if len(self._dic) == 0 or len(self._lazyload) == 0:
seperator = ''
else:
seperator = ', '
lazy_reprs = [(repr(k), repr(v)) for k, v in self._lazyload.items()]
lazy_strs = ['%s: LazyLoad(%s)' % r for r in lazy_reprs]
lazy_str = ", ".join(lazy_strs) + '}'
return str(self._dic)[:-1] + seperator + lazy_str
def has_key(self, key):
""" True if dictionary has key, else False. """
return key in self
def copy(self):
"""
Return a copy of the dictionary.
Lazy keys are not evaluated in the original or copied dictionary.
"""
dic = self.__class__(self._dic.copy())
# load all lazy keys into the copy
for key, value_callable in self._lazyload.items():
dic.set_lazy(key, value_callable)
return dic
# lazy dictionary specific methods
def set_lazy(self, key, value_callable):
""" Set a lazy key to load from a callable object. """
if key in self._dic:
del self._dic[key]
self._lazyload[key] = value_callable
| 32.440299 | 79 | 0.624109 | [
"MIT"
] | 1271756664/study | pycwr/configure/pyart_lazydict.py | 4,347 | Python |
from apis.creat_account.api_account_setAlias import account_setAlias
from apis.creat_account.api_create_account import create_account, create_account_100
from apis.creat_account.api_get_addresslist import get_address_list
from apis.transfer.blockmgr_sendRawTransaction import sendRawTransaction
from apis.transfer.time_of_account_1 import transation_120_account_1
from apis.transfer_inquiry.api_chain_getBalance import check_transfer_balance, transfer_balance, \
getBalance_of_all_address_list, chain_getBalance
from apis.transfer.api_chain_transaction import transaction_one, random_transaction
from apis.transfer_inquiry.trace_getRawTransaction import getRawTransaction, getTransaction, decodeTrasnaction, \
getReceiveTransactionByAd, rebuild, getSendTransactionByAddr
from apis.vote_message.account_voteCredit import voteCredit
from apis.vote_message.chain_getCreditDetails import getVoteCreditDetails
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getPoolTransactions import getPoolTransactions
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getTransactionCount import getTransactionCount
from apis.交易池中的交易状态及交易池中的交易流转过程.blockmgr_getTxInPool import blockmgrGetTxInPool
api_route = {
"create_account": create_account,
"create_account_100": create_account_100,
"get_address_list": get_address_list,
"account_setAlias": account_setAlias,
"transaction_one": transaction_one,
"random_transaction": random_transaction,
"chain_getBalance": chain_getBalance,
"getBalance_of_all_address_list": getBalance_of_all_address_list,
# "creat_one_wallet_account": creat_one_wallet_account,
"transation_120_account_1": transation_120_account_1,
"transfer_balance": transfer_balance,
"check_transfer_balance": check_transfer_balance,
"getRawTransaction": getRawTransaction,
"getTransaction": getTransaction,
"decodeTrasnaction": decodeTrasnaction,
"getSendTransactionByAddr": getSendTransactionByAddr,
"getReceiveTransactionByAd": getReceiveTransactionByAd,
"rebuild": rebuild,
"blockmgrGetTxInPool": blockmgrGetTxInPool,
"getPoolTransactions": getPoolTransactions,
"getTransactionCount": getTransactionCount,
"blockmgr_sendRawTransaction": sendRawTransaction,
"account_voteCredit": voteCredit,
"chain_getVoteCreditDetails": getVoteCreditDetails,
}
# API 总函数
def runCase(case_name):
"""
:param case_name:
:return: 注意格式 xxx(case_name)()
"""
return api_route.get(case_name)()
if __name__ == '__main__':
print(runCase("create_account_100"))
print(runCase("create_account"))
| 40.852459 | 113 | 0.854334 | [
"Apache-2.0"
] | DerWalundDieKatze/Yumekui | apis/router.py | 2,632 | Python |
import db
import sys
if db.connection.is_connected():
for database_name in sys.argv[1:len(sys.argv)]:
cursor = db.connection.cursor()
cursor.execute("DROP DATABASE {}".format(database_name))
print(" > Database {} has been dropped!".format(database_name)) | 35.375 | 71 | 0.681979 | [
"MIT"
] | naufalfachrian/py-database-manager | drop_database.py | 283 | Python |
from django.contrib import messages
from django.http import QueryDict
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.contrib.admin.views.decorators import staff_member_required
from django.template.defaulttags import register
from common.exporter import find_all_exporters
from common.utility import get_image_as_http_response
from common.importer import find_all_importers
from common.search_filters import SearchFilter
from common.label import get_complete_label_name
from django.urls import reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
import os
from .forms import *
from .models import *
from common.user import is_annotater
def get_task_statistics(tasks, user):
for task in tasks:
# Check if user has processed any
task.started = ImageAnnotation.objects.filter(task=task, user=user).count() > 0
task.finished = task.number_of_annotated_images == task.total_number_of_images
def index(request):
context = {}
if is_annotater(request.user):
# Show only tasks assigned to this user
tasks = Task.objects.filter(user=request.user)
get_task_statistics(tasks, request.user)
context['tasks'] = tasks
return render(request, 'annotationweb/index_annotater.html', context)
else:
# Admin page
# Classification tasks
tasks = Task.objects.all()
get_task_statistics(tasks, request.user)
context['tasks'] = tasks
return render(request, 'annotationweb/index_admin.html', context)
@staff_member_required
def export(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
if request.method == 'POST':
exporter_index = int(request.POST['exporter'])
return redirect('export_options', task_id=task.id, exporter_index=exporter_index)
else:
available_exporters = find_all_exporters(task.type)
# If only 1 exporter exists for this type, use that one
if len(available_exporters) == 1:
return redirect('export_options', task_id=task.id, exporter_index=0)
else:
return render(request, 'annotationweb/choose_exporter.html', {'exporters': available_exporters, 'task': task})
@staff_member_required
def export_options(request, task_id, exporter_index):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
available_exporters = find_all_exporters(task.type)
exporter = available_exporters[int(exporter_index)]()
exporter.task = task
if request.method == 'POST':
form = exporter.get_form(data=request.POST)
if form.is_valid():
success, message = exporter.export(form)
if success:
messages.success(request, 'Export finished: ' + message)
else:
messages.error(request, 'Export failed: ' + message)
return redirect('index')
else:
# Get unbound form
form = exporter.get_form()
return render(request, 'annotationweb/export_options.html', {'form': form, 'exporter_index': exporter_index, 'task': task})
@staff_member_required
def import_data(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
raise Http404('Dataset does not exist')
if request.method == 'POST':
importer_index = int(request.POST['importer'])
return redirect('import_options', dataset_id=dataset.id, importer_index=importer_index)
else:
available_importers = find_all_importers()
return render(request, 'annotationweb/choose_importer.html', {'importers': available_importers, 'dataset': dataset})
@staff_member_required
def import_options(request, dataset_id, importer_index):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
raise Http404('Dataset does not exist')
available_importers = find_all_importers()
importer = available_importers[int(importer_index)]()
importer.dataset = dataset
if request.method == 'POST':
form = importer.get_form(data=request.POST)
if form.is_valid():
success, message = importer.import_data(form)
if success:
messages.success(request, 'Import finished: ' + message)
else:
messages.error(request, 'Import failed: ' + message)
return redirect('index')
else:
# Get unbound form
form = importer.get_form()
return render(request, 'annotationweb/import_options.html', {'form': form, 'importer_index': importer_index, 'dataset': dataset})
def show_image(request, image_id, task_id):
try:
task = Task.objects.get(pk=task_id)
image = ImageSequence.objects.get(pk=image_id)
frame = int(image.nr_of_frames/2)
filename = image.format.replace('#', str(frame))
except Task.DoesNotExist:
raise Http404('Task does not exist')
except ImageSequence.DoesNotExist:
raise Http404('Image does not exist')
return get_image_as_http_response(filename, task.post_processing_method)
@staff_member_required
def new_task(request):
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = TaskForm()
context = {'form': form}
return render(request, 'annotationweb/new_task.html', context)
@staff_member_required
def new_label(request):
if request.method == 'POST':
form = LabelForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = LabelForm()
context = {'form': form}
return render(request, 'annotationweb/new_label.html', context)
@staff_member_required
def delete_task(request, task_id):
# TODO do cleanup after deleting task?
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('Task not found')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
task.delete()
messages.success(request, 'The task ' + task.name + ' was deleted.')
return redirect('index')
else:
return render(request, 'annotationweb/delete_task.html', {'task': task})
@staff_member_required
def datasets(request):
# Show all datasets
context = {}
context['datasets'] = Dataset.objects.all()
return render(request, 'annotationweb/datasets.html', context)
@staff_member_required
def new_dataset(request):
if request.method == 'POST':
form = DatasetForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'New dataset created')
return redirect('datasets')
else:
form = DatasetForm()
return render(request, 'annotationweb/new_dataset.html', {'form': form})
@staff_member_required
def delete_dataset(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('Dataset not found')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
dataset.delete()
messages.success(request, 'Dataset ' + dataset.name + ' was deleted.')
return redirect('datasets')
else:
return render(request, 'annotationweb/delete_dataset.html', {'dataset': dataset})
def get_start_and_total_frames(file_format):
# Find start_number and total number of frames automatically
i = 0
# Start frame can either be 0 or 1
start_frame = None
nr_of_frames = 0
while True:
exists = False
if os.path.isfile(file_format.replace('#', str(i))):
exists = True
nr_of_frames += 1
if start_frame is None:
if exists:
start_frame = i
elif i > 1:
break
else:
if not exists:
break
i += 1
return start_frame, nr_of_frames
@staff_member_required
def add_image_sequence(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
raise Http404('Subject does not exist')
if request.method == 'POST':
form = ImageSequenceForm(request.POST)
if form.is_valid():
new_image_sequence = form.save(commit=False) # Create new model, but don't save to DB
start_frame, total_nr_of_frames = get_start_and_total_frames(new_image_sequence.format)
print(start_frame, total_nr_of_frames)
if start_frame is None:
messages.error(request, 'No data existed with the provided filename format.')
else:
new_image_sequence.nr_of_frames = total_nr_of_frames
new_image_sequence.start_frame_nr = start_frame
new_image_sequence.subject = subject
new_image_sequence.save() # Save to db
messages.success(request, 'Sequence successfully added')
return redirect('dataset_details', subject.dataset.id)
else:
form = ImageSequenceForm()
return render(request, 'annotationweb/add_image_sequence.html', {'form': form, 'subject': subject})
@staff_member_required
def select_key_frames(request, task_id, image_id):
try:
image_sequence = ImageSequence.objects.get(pk=image_id)
task = Task.objects.get(pk=task_id)
except ImageSequence.DoesNotExist:
raise Http404('Image sequence does not exist')
except Task.DoesNotExist:
raise Http404('Task does not exist')
if request.method == 'POST':
frame_list = request.POST.getlist('frames')
if len(frame_list) == 0:
messages.error(request, 'You must select at least 1 frame')
else:
# Add annotation object if not exists
try:
annotation = ImageAnnotation.objects.get(image_id=image_id, task_id=task_id)
except ImageAnnotation.DoesNotExist:
annotation = ImageAnnotation()
annotation.image_id = image_id
annotation.task_id = task_id
annotation.rejected = False
annotation.user = request.user
annotation.finished = False
annotation.save()
# Add frames to db
for frame_nr in frame_list:
# Add new key frames if not exists
print(frame_nr)
try:
key_frame = KeyFrameAnnotation.objects.get(image_annotation=annotation, frame_nr=frame_nr)
# Already exists, do nothing
except KeyFrameAnnotation.DoesNotExist:
# Does not exist, add it
key_frame = KeyFrameAnnotation()
key_frame.image_annotation = annotation
key_frame.frame_nr = frame_nr
key_frame.save()
if annotation.finished:
# New frame, mark annotation as unfinished
annotation.finished = False
annotation.save()
# Delete frames that were not added
to_delete = KeyFrameAnnotation.objects.filter(image_annotation=annotation).exclude(frame_nr__in=frame_list)
deleted_count = len(to_delete)
to_delete.delete()
messages.success(request, 'The ' + str(len(frame_list)) + ' key frames were stored. ' + str(deleted_count) + ' key frames were deleted.')
return redirect('task', task_id)
else:
frames = KeyFrameAnnotation.objects.filter(image_annotation__image=image_sequence, image_annotation__task=task)
return render(request, 'annotationweb/add_key_frames.html', {'image_sequence': image_sequence, 'task': task, 'frames': frames})
def show_frame(request, image_sequence_id, frame_nr, task_id):
# Get image sequence the key frame belongs to
try:
task = Task.objects.get(pk=task_id)
image_sequence = ImageSequence.objects.get(pk=image_sequence_id)
except Task.DoesNotExist:
raise Http404('Task does not exist')
except ImageSequence.DoesNotExist:
raise Http404('Image sequence does not exist')
filename = image_sequence.format.replace('#', str(frame_nr))
return get_image_as_http_response(filename, task.post_processing_method)
@staff_member_required()
def dataset_details(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('The dataset does not exist')
return render(request, 'annotationweb/dataset_details.html', {'dataset': dataset})
@staff_member_required()
def new_subject(request, dataset_id):
try:
dataset = Dataset.objects.get(pk=dataset_id)
except Dataset.DoesNotExist:
return Http404('The dataset does not exist')
if request.method == 'POST':
form = SubjectForm(request.POST)
if form.is_valid():
subject = form.save(commit=False)
subject.dataset = dataset
subject.save()
messages.success(request, 'Subject added')
return redirect('dataset_details', dataset.id)
else:
form = SubjectForm()
return render(request, 'annotationweb/new_subject.html', {'dataset': dataset, 'form': form})
@staff_member_required()
def delete_subject(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
return Http404('The subject does not exist')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
subject.delete()
messages.success(request, 'The subject ' + subject.name + ' was deleted.')
return redirect('dataset_details', subject.dataset.id)
else:
return render(request, 'annotationweb/delete_subject.html', {'subject': subject})
@staff_member_required()
def subject_details(request, subject_id):
try:
subject = Subject.objects.get(pk=subject_id)
except Subject.DoesNotExist:
return Http404('The subject does not exist')
return render(request, 'annotationweb/subject_details.html', {'subject': subject})
@staff_member_required()
def delete_sequence(request, sequence_id):
try:
sequence = ImageSequence.objects.get(pk=sequence_id)
except ImageSequence.DoesNotExist:
return Http404('The sequence does not exist')
if request.method == 'POST':
if request.POST['choice'] == 'Yes':
sequence.delete()
messages.success(request, 'The subject ' + sequence.format + ' was deleted.')
return redirect('subject_details', sequence.subject.id)
else:
return render(request, 'annotationweb/delete_sequence.html', {'sequence': sequence})
def task_description(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
if task.type == task.CLASSIFICATION:
url = reverse('classification:label_image', args=[task_id])
elif task.type == task.BOUNDING_BOX:
url = reverse('boundingbox:process_image', args=[task_id])
elif task.type == task.LANDMARK:
url = reverse('landmark:process_image', args=[task_id])
elif task.type == task.CARDIAC_SEGMENTATION:
url = reverse('cardiac:segment_image', args=[task_id])
elif task.type == task.SPLINE_SEGMENTATION:
url = reverse('spline_segmentation:segment_image', args=[task_id])
else:
raise NotImplementedError()
return render(request, 'annotationweb/task_description.html', {'task': task, 'continue_url': url})
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.simple_tag
def complete_label(label):
return get_complete_label_name(label)
@register.filter(name='times')
def times(number):
return range(number)
def reset_filters(request, task_id):
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
search_filters = SearchFilter(request, task)
search_filters.delete()
return redirect('task', task_id)
def task(request, task_id):
# Image list site
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
search_filters = SearchFilter(request, task)
if request.method == 'POST':
form = search_filters.create_form(data=request.POST)
else:
form = search_filters.create_form()
queryset = ImageSequence.objects.all()
# Get all processed images for given task
sort_by = search_filters.get_value('sort_by')
subjects_selected = search_filters.get_value('subject')
users_selected = search_filters.get_value('user')
image_quality = search_filters.get_value('image_quality')
metadata = search_filters.get_value('metadata')
if len(metadata) > 0:
metadata_dict = {}
for item in metadata:
parts = item.split(': ')
if len(parts) != 2:
raise Exception('Error: must be 2 parts')
name = parts[0]
value = parts[1]
if name in metadata_dict.keys():
metadata_dict[name].append(value)
else:
metadata_dict[name] = [value]
for name, values in metadata_dict.items():
queryset = queryset.filter(
imagemetadata__name=name,
imagemetadata__value__in=values
)
if sort_by == ImageListForm.SORT_IMAGE_ID:
queryset = queryset.filter(
subject__dataset__task=task,
subject__in=subjects_selected
)
elif sort_by == ImageListForm.SORT_NOT_ANNOTATED_IMAGE_ID:
queryset = queryset.filter(
subject__dataset__task=task,
subject__in=subjects_selected
).exclude(imageannotation__task=task, imageannotation__finished=True)
else:
if task.type == Task.CLASSIFICATION:
labels_selected = search_filters.get_value('label')
queryset = queryset.filter(
imageannotation__image_quality__in=image_quality,
imageannotation__task=task,
imageannotation__finished=True,
imageannotation__user__in=users_selected,
imageannotation__keyframeannotation__imagelabel__in=labels_selected,
subject__in=subjects_selected,
)
else:
queryset = queryset.filter(
imageannotation__image_quality__in=image_quality,
imageannotation__task=task,
imageannotation__finished=True,
imageannotation__user__in=users_selected,
subject__in=subjects_selected
)
if sort_by == ImageListForm.SORT_DATE_DESC:
queryset = queryset.order_by('-imageannotation__date')
else:
queryset = queryset.order_by('imageannotation__date')
paginator = Paginator(queryset, 12)
page = request.GET.get('page')
try:
images = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
images = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
images = paginator.page(paginator.num_pages)
for image in images:
# Get annotation
try:
image.annotation = ImageAnnotation.objects.get(image=image, task=task)
image.annotation_frames = KeyFrameAnnotation.objects.filter(image_annotation=image.annotation)
except:
pass
return_url = reverse('task', kwargs={'task_id': task_id})
if page is not None:
return_url += '?page=' + str(page)
request.session['return_to_url'] = return_url
return render(request, 'annotationweb/task.html', {'images': images, 'task': task, 'form': form})
def get_redirection(task):
if task.type == Task.CLASSIFICATION:
return 'classification:label_image'
elif task.type == Task.BOUNDING_BOX:
return 'boundingbox:process_image'
elif task.type == Task.LANDMARK:
return 'landmark:process_image'
elif task.type == Task.CARDIAC_SEGMENTATION:
return 'cardiac:segment_image'
elif task.type == Task.SPLINE_SEGMENTATION:
return 'spline_segmentation:segment_image'
# @register.simple_tag
# def urlencode_dict(dict):
# print(dict)
# url = ''
# if len(dict) > 0:
# first = True
# for key, value_list in dict.items():
# print(value_list)
# if type(value_list) is not list:
# value_list = [value_list]
# for value in value_list:
# if first:
# url += '?'
# first = False
# else:
# url += '&'
#
# url += key + '=' + str(value)
#
# return mark_safe(url)
def annotate_next_image(request, task_id):
# Find the task type and redirect
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
url = reverse(get_redirection(task), kwargs={'task_id': task.id})
return redirect(url + '?' + request.GET.urlencode())
def annotate_image(request, task_id, image_id):
# Find the task type and redirect
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return Http404('The Task does not exist')
url = reverse(get_redirection(task), kwargs={'task_id': task.id, 'image_id': image_id})
return redirect(url + '?' + request.GET.urlencode())
| 34.849765 | 149 | 0.648121 | [
"MIT"
] | andreped/annotationweb | annotationweb/views.py | 22,269 | Python |
import copy
import torch
from torch import nn
import numpy as np
from tokens import *
def tokenize(corpus, callback=lambda sent: sent.split()):
return [callback(sent) for sent in corpus]
def add_start_stop_tokens(corpus):
return [[START_TOKEN] + sent + [STOP_TOKEN] for sent in corpus]
def padding(corpus, seq_len):
for sent in corpus:
while len(sent) < seq_len:
sent.append(PAD_TOKEN)
while len(sent) > seq_len:
sent.pop()
return corpus
def build_vocab(corpus):
vocab = set()
for sent in corpus:
vocab.update(set(sent))
vocab = list(vocab) + [UNK_TOKEN]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {idx: word for idx, word in enumerate(vocab)}
return vocab, word2idx, idx2word
def convert_to_idx(corpus, word2idx):
return [[word2idx.get(word, "<UNK>") for word in sent] for sent in corpus]
# Output Processing
def process_output_corpus(input_seqs, preds, trues):
new_seqs = []
new_preds = []
new_trues = []
for i in range(len(input_seqs)):
new_seq, new_pred, new_true = remove_special_tokens(
input_seqs[i], preds[i], trues[i]
)
new_seqs.append(new_seq)
new_preds.append(new_pred)
new_trues.append(new_true)
return new_seqs, new_preds, new_trues
def remove_special_tokens(input_seq, pred, true):
new_seq = []
new_pred = []
new_true = []
new_seq = input_seq[1:-1]
new_true = true[1:-1]
new_pred = pred[1:]
# if is truncated padding
while len(new_pred) < len(new_seq):
new_pred.append(PAD_TOKEN)
# if is expanded padding
while len(new_pred) > len(new_seq):
new_pred = new_pred[:-1]
return new_seq, new_pred, new_true
def convert_to_token(corpus, idx2token):
return [[idx2token[token_idx] for token_idx in sent] for sent in corpus]
def preprocess_utterances(utterances, utterance_dataset):
# tokenization
utterances = tokenize(utterances)
# add special tokens
utterances = add_start_stop_tokens(utterances)
tokenized_utterances = copy.deepcopy(utterances)
# padding
utterances = padding(utterances, utterance_dataset.seq_len)
word2idx = utterance_dataset.word2idx
utterances = [
[word2idx.get(token, word2idx[UNK_TOKEN]) for token in sent]
for sent in utterances
]
return utterances, tokenized_utterances
def read_glove_vector(glove_vec):
with open(glove_vec, "r", encoding="UTF-8") as f:
words = set()
word_to_vec_map = {}
for line in f:
w_line = line.split()
curr_word = w_line[0]
word_to_vec_map[curr_word] = np.array(w_line[1:], dtype=np.float64)
return word_to_vec_map
# functions for creating the embedding layer
def get_one_hot_matrix(vocab):
one_hot_matrix = np.zeros((len(vocab), len(vocab)))
np.fill_diagonal(one_hot_matrix, 1)
return one_hot_matrix
def get_glove_matrix(glove_map, vocab):
matrix_len = len(vocab)
emb_dim = len(list(glove_map.values())[0])
weights_matrix = np.zeros((matrix_len, emb_dim))
for i, word in enumerate(vocab):
try:
weights_matrix[i] = glove_map[word]
except KeyError:
if word in [PAD_TOKEN, START_TOKEN, STOP_TOKEN]:
weights_matrix[i] = np.zeros((emb_dim,))
else:
weights_matrix[i] = np.random.normal(
scale=0.6, size=(emb_dim,)
)
return weights_matrix
def create_emb_layer(weights_matrix, non_trainable=False):
num_embeddings, embedding_dim = weights_matrix.shape
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({"weight": torch.tensor(weights_matrix)})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
| 27.65493 | 79 | 0.663356 | [
"MIT"
] | daohuei/ucsc-nlp-unicorn | nlp_243/hw3/utils.py | 3,927 | Python |
import numpy as np
'''
dccol : 1-8
dcpad : 1-10
mcecol: 0,1
mcerow: 0-32
'''
#w,h = 10,8
# def det2mce(detcol,detrow,detpol):
# dccol,dcpad = det2dc(detcol,detrow,detpol)
# if dccol<0 or dcpad<0:
# return -1,-1
# mcecol,mcerow = dc2mce(dccol,dcpad)
# return mcecol,mcerow
def mce2det(mcecol,mcerow):
if mcecol<=17:
detcol=mcecol
else:
detcol=mcecol-18
if mcerow<=17:
detrow=mcerow
detpol='A'
if mcerow>17:
detrow=mcerow-18
detpol='B'
#detcol,detrow,detpol = dc2det(dccol,dcpad)
im = 0 #not sure what this is
return im,detcol,detrow,detpol
| 17.75 | 45 | 0.679577 | [
"MIT"
] | LorenzoMinutolo/SigViz | ba150_ModuleMapping_fake.py | 568 | Python |
import datetime
from .. import db
tags = db.Table(
'post_tags',
db.Column('post_id', db.Integer, db.ForeignKey('post.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))
)
class Post(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255), nullable=False)
text = db.Column(db.Text(), nullable=False)
publish_date = db.Column(db.DateTime(), default=datetime.datetime.now)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
youtube_id = db.Column(db.String(255))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
tags = db.relationship('Tag', secondary=tags, backref=db.backref('posts', lazy='dynamic'))
def __init__(self, title=""):
self.title = title
def __repr__(self):
return "<Post '{}'>".format(self.title)
class Comment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(255), nullable=False)
text = db.Column(db.Text(), nullable=False)
date = db.Column(db.DateTime(), default=datetime.datetime.now)
post_id = db.Column(db.Integer(), db.ForeignKey('post.id'))
def __repr__(self):
return "<Comment '{}'>".format(self.text[:15])
class Tag(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(255), nullable=False, unique=True)
def __init__(self, title=""):
self.title = title
def __repr__(self):
return "<Tag '{}'>".format(self.title)
class Reminder(db.Model):
id = db.Column(db.Integer(), primary_key=True)
date = db.Column(db.DateTime())
email = db.Column(db.String())
text = db.Column(db.Text())
def __repr__(self):
return "<Reminder '{}'>".format(self.text[:20])
| 30.810345 | 94 | 0.641858 | [
"MIT"
] | PacktPublishing/Mastering-Flask-Web-Development | Chapter11/webapp/blog/models.py | 1,787 | Python |
# -*- coding: utf-8 -*-
"""Tests for :mod:`docdata`."""
| 14.25 | 31 | 0.491228 | [
"MIT"
] | cthoyt/docdata | tests/__init__.py | 57 | Python |
from enum import Enum
from .factory import createFromUri
import os
import requests
class Record(object):
"""
Create a Polarion test record,
:param polarion: Polarion client object
:param test_run: Test run instance
:param polarion_record: The data from Polarion of this testrun
:param index: The index of this record in the test run
"""
class ResultType(Enum):
"""
Record result enum
"""
No = None
PASSED = 'passed'
FAILED = 'failed'
BLOCKED = 'blocked'
def __init__(self, polarion, test_run, polarion_record, index):
self._polarion = polarion
self._test_run = test_run
self._polarion_record = polarion_record
self._index = index
self._buildWorkitemFromPolarion()
def _buildWorkitemFromPolarion(self):
# parse all polarion attributes to this class
for attr, value in self._polarion_record.__dict__.items():
for key in value:
setattr(self, key, value[key])
self._testcase = self._polarion_record.testCaseURI
self._testcase_name = self._testcase.split('}')[1]
self._defect = self._polarion_record.defectURI
def _reloadFromPolarion(self):
service = self._polarion.getService('TestManagement')
self._polarion_record = service.getTestCaseRecords(self._test_run.uri, self._testcase)[0]
self._buildWorkitemFromPolarion()
# self._original_polarion_test_run = copy.deepcopy(self._polarion_test_run)
def setTestStepResult(self, step_number, result: ResultType, comment=None):
""""
Set the result of a test step
:param step_number: Step number
:param result: The result fo the test step
:param comment: An optional comment
"""
if self.testStepResults is None:
# get the number of test steps in
service = self._polarion.getService('TestManagement')
test_steps = service.getTestSteps(self.testCaseURI)
number_of_steps = 0
if test_steps.steps is not None:
number_of_steps = len(test_steps.steps.TestStep)
self.testStepResults = self._polarion.ArrayOfTestStepResultType()
for _i in range(number_of_steps):
self.testStepResults.TestStepResult.append(
self._polarion.TestStepResultType())
if step_number < len(self.testStepResults.TestStepResult):
self.testStepResults.TestStepResult[step_number].result = self._polarion.EnumOptionIdType(
id=result.value)
if comment is not None:
self.testStepResults.TestStepResult[step_number].comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
self.save()
def getResult(self):
"""
Get the test result of this record
:return: The test case result
:rtype: ResultType
"""
if self.result is not None:
return self.ResultType(self.result.id)
return self.ResultType.No
def getComment(self):
"""
Get a comment if available. The comment may contain HTML if edited in Polarion!
:return: Get the comment, may contain HTML
:rtype: string
"""
if self.comment is not None:
return self.comment.content
return None
@property
def testcase_id(self):
"""
The test case name including prefix
"""
return self._testcase_name
def getTestCaseName(self):
"""
Get the test case name including prefix
:return: The name
:rtype: string
"""
return self._testcase_name
def setComment(self, comment):
"""
tries to get the severity enum of this workitem type
When it fails to get it, the list will be empty
:param comment: Comment string, may contain HTML
"""
self.comment = self._polarion.TextType(
content=comment, type='text/html', contentLossy=False)
def setResult(self, result: ResultType = ResultType.FAILED, comment=None):
"""
Set the result of this record and save it.
:param result: The result of this record
:param comment: Comment string, may contain HTML
"""
if comment is not None:
self.setComment(comment)
if self.result is not None:
self.result.id = result.value
else:
self.result = self._polarion.EnumOptionIdType(
id=result.value)
self.save()
def getExecutingUser(self):
"""
Gets the executing user if the test was executed
:return: The user
:rtype: User/None
"""
if self.executedByURI is not None:
return createFromUri(self._polarion, None, self.executedByURI)
return None
def hasAttachment(self):
"""
Checks if the Record has attachments
:return: True/False
:rtype: boolean
"""
if self.attachments is not None:
return True
return False
def getAttachment(self, file_name):
"""
Get the attachment data
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentAsFile(self, file_name, file_path):
"""
Save an attachment to file.
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachment(file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachment(self, file_name):
"""
Delete an attachment.
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestRecord(self._test_run.uri, self._index, file_name)
self._reloadFromPolarion()
def addAttachment(self, file_path, title):
"""
Upload an attachment
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestRecord(self._test_run.uri, self._index, file_name, title, file_content.read())
self._reloadFromPolarion()
def testStepHasAttachment(self, step_index):
"""
Checks if the a test step has attachments
:param step_index: The test step index
:return: True/False
:rtype: boolean
"""
if self.testStepResults is None:
return False
if self.testStepResults.TestStepResult[step_index].attachments is not None:
return True
return False
def getAttachmentFromTestStep(self, step_index, file_name):
"""
Get the attachment data from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:return: list of bytes
:rtype: bytes[]
"""
# find the file
url = None
for attachment in self.testStepResults.TestStepResult[step_index].attachments.TestRunAttachment:
if attachment.fileName == file_name:
url = attachment.url
if url is not None:
resp = requests.get(url, auth=(self._polarion.user, self._polarion.password))
if resp.ok:
return resp.content
else:
raise Exception(f'Could not download attachment {file_name}')
else:
raise Exception(f'Could not find attachment with name {file_name}')
def saveAttachmentFromTestStepAsFile(self, step_index, file_name, file_path):
"""
Save an attachment to file from a test step
:param step_index: The test step index
:param file_name: The attachment file name
:param file_path: File where to save the attachment
"""
bin = self.getAttachmentFromTestStep(step_index, file_name)
with open(file_path, "wb") as file:
file.write(bin)
def deleteAttachmentFromTestStep(self, step_index, file_name):
"""
Delete an attachment from a test step
:param step_index: The test step index
:param file_name: The attachment file name
"""
service = self._polarion.getService('TestManagement')
service.deleteAttachmentFromTestStep(self._test_run.uri, self._index, step_index, file_name)
self._reloadFromPolarion()
def addAttachmentToTestStep(self, step_index, file_path, title):
"""
Upload an attachment to a test step
:param step_index: The test step index
:param file_path: Source file to upload
:param title: The title of the attachment
"""
service = self._polarion.getService('TestManagement')
file_name = os.path.split(file_path)[1]
with open(file_path, "rb") as file_content:
service.addAttachmentToTestStep(self._test_run.uri, self._index, step_index, file_name, title, file_content.read())
self._reloadFromPolarion()
def save(self):
"""
Saves the current test record
"""
new_item = {}
for attr, value in self.__dict__.items():
if not attr.startswith('_'):
# only add if public value
new_item[attr] = value
service = self._polarion.getService('TestManagement')
service.executeTest(
self._test_run.uri, new_item)
self._reloadFromPolarion()
def __repr__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
def __str__(self):
return f'{self._testcase_name} in {self._test_run.id} ({self.getResult()} on {self.executed})'
| 33.851266 | 127 | 0.618678 | [
"MIT"
] | jesper-raemaekers/python-polarion | polarion/record.py | 10,697 | Python |
#!/usr/bin/env python
#
# Init file for Shotgun event daemon
#
# chkconfig: 345 99 00
# description: Shotgun event daemon
#
### BEGIN INIT INFO
# Provides: shotgunEvent
# Required-Start: $network
# Should-Start: $remote_fs
# Required-Stop: $network
# Should-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Short-Description: Shotgun event daemon
# Description: Shotgun event daemon
### END INIT INFO
"""
For an overview of shotgunEvents, please see raw documentation in the docs
folder or an html compiled version at:
http://shotgunsoftware.github.com/shotgunEvents
"""
from __future__ import print_function
__version__ = "1.0"
__version_info__ = (1, 0)
# Suppress the deprecation warning about imp until we get around to replacing it
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp
import datetime
import logging
import logging.handlers
import os
import pprint
import socket
import sys
import time
import traceback
from six.moves import configparser
import six.moves.cPickle as pickle
from distutils.version import StrictVersion
if sys.platform == "win32":
import win32serviceutil
import win32service
import win32event
import servicemanager
import daemonizer
import shotgun_api3 as sg
from shotgun_api3.lib.sgtimezone import SgTimezone
SG_TIMEZONE = SgTimezone()
CURRENT_PYTHON_VERSION = StrictVersion(sys.version.split()[0])
PYTHON_26 = StrictVersion("2.6")
PYTHON_27 = StrictVersion("2.7")
EMAIL_FORMAT_STRING = """Time: %(asctime)s
Logger: %(name)s
Path: %(pathname)s
Function: %(funcName)s
Line: %(lineno)d
%(message)s"""
def _setFilePathOnLogger(logger, path):
# Remove any previous handler.
_removeHandlersFromLogger(logger, logging.handlers.TimedRotatingFileHandler)
# Add the file handler
handler = logging.handlers.TimedRotatingFileHandler(
path, "midnight", backupCount=10
)
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
def _removeHandlersFromLogger(logger, handlerTypes=None):
"""
Remove all handlers or handlers of a specified type from a logger.
@param logger: The logger who's handlers should be processed.
@type logger: A logging.Logger object
@param handlerTypes: A type of handler or list/tuple of types of handlers
that should be removed from the logger. If I{None}, all handlers are
removed.
@type handlerTypes: L{None}, a logging.Handler subclass or
I{list}/I{tuple} of logging.Handler subclasses.
"""
for handler in logger.handlers:
if handlerTypes is None or isinstance(handler, handlerTypes):
logger.removeHandler(handler)
def _addMailHandlerToLogger(
logger,
smtpServer,
fromAddr,
toAddrs,
emailSubject,
username=None,
password=None,
secure=None,
):
"""
Configure a logger with a handler that sends emails to specified
addresses.
The format of the email is defined by L{LogFactory.EMAIL_FORMAT_STRING}.
@note: Any SMTPHandler already connected to the logger will be removed.
@param logger: The logger to configure
@type logger: A logging.Logger instance
@param toAddrs: The addresses to send the email to.
@type toAddrs: A list of email addresses that will be passed on to the
SMTPHandler.
"""
if smtpServer and fromAddr and toAddrs and emailSubject:
mailHandler = CustomSMTPHandler(
smtpServer, fromAddr, toAddrs, emailSubject, (username, password), secure
)
mailHandler.setLevel(logging.ERROR)
mailFormatter = logging.Formatter(EMAIL_FORMAT_STRING)
mailHandler.setFormatter(mailFormatter)
logger.addHandler(mailHandler)
class Config(configparser.SafeConfigParser):
def __init__(self, path):
configparser.SafeConfigParser.__init__(self, os.environ)
self.read(path)
def getShotgunURL(self):
return self.get("shotgun", "server")
def getEngineScriptName(self):
return self.get("shotgun", "name")
def getEngineScriptKey(self):
return self.get("shotgun", "key")
def getEngineProxyServer(self):
try:
proxy_server = self.get("shotgun", "proxy_server").strip()
if not proxy_server:
return None
return proxy_server
except configparser.NoOptionError:
return None
def getEventIdFile(self):
return self.get("daemon", "eventIdFile")
def getEnginePIDFile(self):
return self.get("daemon", "pidFile")
def getPluginPaths(self):
return [s.strip() for s in self.get("plugins", "paths").split(",")]
def getSMTPServer(self):
return self.get("emails", "server")
def getSMTPPort(self):
if self.has_option("emails", "port"):
return self.getint("emails", "port")
return 25
def getFromAddr(self):
return self.get("emails", "from")
def getToAddrs(self):
return [s.strip() for s in self.get("emails", "to").split(",")]
def getEmailSubject(self):
return self.get("emails", "subject")
def getEmailUsername(self):
if self.has_option("emails", "username"):
return self.get("emails", "username")
return None
def getEmailPassword(self):
if self.has_option("emails", "password"):
return self.get("emails", "password")
return None
def getSecureSMTP(self):
if self.has_option("emails", "useTLS"):
return self.getboolean("emails", "useTLS") or False
return False
def getLogMode(self):
return self.getint("daemon", "logMode")
def getLogLevel(self):
return self.getint("daemon", "logging")
def getMaxEventBatchSize(self):
if self.has_option("daemon", "max_event_batch_size"):
return self.getint("daemon", "max_event_batch_size")
return 500
def getLogFile(self, filename=None):
if filename is None:
if self.has_option("daemon", "logFile"):
filename = self.get("daemon", "logFile")
else:
raise ConfigError("The config file has no logFile option.")
if self.has_option("daemon", "logPath"):
path = self.get("daemon", "logPath")
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
raise ConfigError(
"The logPath value in the config should point to a directory."
)
path = os.path.join(path, filename)
else:
path = filename
return path
def getTimingLogFile(self):
if (
not self.has_option("daemon", "timing_log")
or self.get("daemon", "timing_log") != "on"
):
return None
return self.getLogFile() + ".timing"
class Engine(object):
"""
The engine holds the main loop of event processing.
"""
def __init__(self, configPath):
""" """
self._continue = True
self._eventIdData = {}
# Read/parse the config
self.config = Config(configPath)
# Get config values
self._pluginCollections = [
PluginCollection(self, s) for s in self.config.getPluginPaths()
]
self._sg = sg.Shotgun(
self.config.getShotgunURL(),
self.config.getEngineScriptName(),
self.config.getEngineScriptKey(),
http_proxy=self.config.getEngineProxyServer(),
)
self._max_conn_retries = self.config.getint("daemon", "max_conn_retries")
self._conn_retry_sleep = self.config.getint("daemon", "conn_retry_sleep")
self._fetch_interval = self.config.getint("daemon", "fetch_interval")
self._use_session_uuid = self.config.getboolean("shotgun", "use_session_uuid")
# Setup the loggers for the main engine
if self.config.getLogMode() == 0:
# Set the root logger for file output.
rootLogger = logging.getLogger()
rootLogger.config = self.config
_setFilePathOnLogger(rootLogger, self.config.getLogFile())
print(self.config.getLogFile())
# Set the engine logger for email output.
self.log = logging.getLogger("engine")
self.setEmailsOnLogger(self.log, True)
else:
# Set the engine logger for file and email output.
self.log = logging.getLogger("engine")
self.log.config = self.config
_setFilePathOnLogger(self.log, self.config.getLogFile())
self.setEmailsOnLogger(self.log, True)
self.log.setLevel(self.config.getLogLevel())
# Setup the timing log file
timing_log_filename = self.config.getTimingLogFile()
if timing_log_filename:
self.timing_logger = logging.getLogger("timing")
self.timing_logger.setLevel(self.config.getLogLevel())
_setFilePathOnLogger(self.timing_logger, timing_log_filename)
else:
self.timing_logger = None
super(Engine, self).__init__()
def setEmailsOnLogger(self, logger, emails):
# Configure the logger for email output
_removeHandlersFromLogger(logger, logging.handlers.SMTPHandler)
if emails is False:
return
smtpServer = self.config.getSMTPServer()
smtpPort = self.config.getSMTPPort()
fromAddr = self.config.getFromAddr()
emailSubject = self.config.getEmailSubject()
username = self.config.getEmailUsername()
password = self.config.getEmailPassword()
if self.config.getSecureSMTP():
secure = (None, None)
else:
secure = None
if emails is True:
toAddrs = self.config.getToAddrs()
elif isinstance(emails, (list, tuple)):
toAddrs = emails
else:
msg = "Argument emails should be True to use the default addresses, False to not send any emails or a list of recipient addresses. Got %s."
raise ValueError(msg % type(emails))
_addMailHandlerToLogger(
logger,
(smtpServer, smtpPort),
fromAddr,
toAddrs,
emailSubject,
username,
password,
secure,
)
def start(self):
"""
Start the processing of events.
The last processed id is loaded up from persistent storage on disk and
the main loop is started.
"""
# TODO: Take value from config
socket.setdefaulttimeout(60)
# Notify which version of shotgun api we are using
self.log.info("Using SG Python API version %s" % sg.__version__)
try:
for collection in self._pluginCollections:
collection.load()
self._loadEventIdData()
self._mainLoop()
except KeyboardInterrupt:
self.log.warning("Keyboard interrupt. Cleaning up...")
except Exception as err:
msg = "Crash!!!!! Unexpected error (%s) in main loop.\n\n%s"
self.log.critical(msg, type(err), traceback.format_exc(err))
def _loadEventIdData(self):
"""
Load the last processed event id from the disk
If no event has ever been processed or if the eventIdFile has been
deleted from disk, no id will be recoverable. In this case, we will try
contacting Shotgun to get the latest event's id and we'll start
processing from there.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile and os.path.exists(eventIdFile):
try:
fh = open(eventIdFile, "rb")
try:
self._eventIdData = pickle.load(fh)
# Provide event id info to the plugin collections. Once
# they've figured out what to do with it, ask them for their
# last processed id.
noStateCollections = []
for collection in self._pluginCollections:
state = self._eventIdData.get(collection.path)
if state:
collection.setState(state)
else:
noStateCollections.append(collection)
# If we don't have a state it means there's no match
# in the id file. First we'll search to see the latest id a
# matching plugin name has elsewhere in the id file. We do
# this as a fallback in case the plugins directory has been
# moved. If there's no match, use the latest event id
# in Shotgun.
if noStateCollections:
maxPluginStates = {}
for collection in self._eventIdData.values():
for pluginName, pluginState in collection.items():
if pluginName in maxPluginStates.keys():
if pluginState[0] > maxPluginStates[pluginName][0]:
maxPluginStates[pluginName] = pluginState
else:
maxPluginStates[pluginName] = pluginState
lastEventId = self._getLastEventIdFromDatabase()
for collection in noStateCollections:
state = collection.getState()
for pluginName in state.keys():
if pluginName in maxPluginStates.keys():
state[pluginName] = maxPluginStates[pluginName]
else:
state[pluginName] = lastEventId
collection.setState(state)
except pickle.UnpicklingError:
fh.close()
# Backwards compatibility:
# Reopen the file to try to read an old-style int
fh = open(eventIdFile, "rb")
line = fh.readline().strip()
if line.isdigit():
# The _loadEventIdData got an old-style id file containing a single
# int which is the last id properly processed.
lastEventId = int(line)
self.log.debug(
"Read last event id (%d) from file.", lastEventId
)
for collection in self._pluginCollections:
collection.setState(lastEventId)
fh.close()
except OSError as err:
raise EventDaemonError(
"Could not load event id from file.\n\n%s"
% traceback.format_exc(err)
)
else:
# No id file?
# Get the event data from the database.
lastEventId = self._getLastEventIdFromDatabase()
if lastEventId:
for collection in self._pluginCollections:
collection.setState(lastEventId)
self._saveEventIdData()
def _getLastEventIdFromDatabase(self):
conn_attempts = 0
lastEventId = None
while lastEventId is None:
order = [{"column": "id", "direction": "desc"}]
try:
result = self._sg.find_one(
"EventLogEntry", filters=[], fields=["id"], order=order
)
except (sg.ProtocolError, sg.ResponseError, socket.error) as err:
conn_attempts = self._checkConnectionAttempts(conn_attempts, str(err))
except Exception as err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
else:
lastEventId = result["id"]
self.log.info("Last event id (%d) from the SG database.", lastEventId)
return lastEventId
def _mainLoop(self):
"""
Run the event processing loop.
General behavior:
- Load plugins from disk - see L{load} method.
- Get new events from Shotgun
- Loop through events
- Loop through each plugin
- Loop through each callback
- Send the callback an event
- Once all callbacks are done in all plugins, save the eventId
- Go to the next event
- Once all events are processed, wait for the defined fetch interval time and start over.
Caveats:
- If a plugin is deemed "inactive" (an error occured during
registration), skip it.
- If a callback is deemed "inactive" (an error occured during callback
execution), skip it.
- Each time through the loop, if the pidFile is gone, stop.
"""
self.log.debug("Starting the event processing loop.")
while self._continue:
# Process events
events = self._getNewEvents()
for event in events:
for collection in self._pluginCollections:
collection.process(event)
self._saveEventIdData()
# if we're lagging behind Shotgun, we received a full batch of events
# skip the sleep() call in this case
if len(events) < self.config.getMaxEventBatchSize():
time.sleep(self._fetch_interval)
# Reload plugins
for collection in self._pluginCollections:
collection.load()
# Make sure that newly loaded events have proper state.
self._loadEventIdData()
self.log.debug("Shuting down event processing loop.")
def stop(self):
self._continue = False
def _getNewEvents(self):
"""
Fetch new events from Shotgun.
@return: Recent events that need to be processed by the engine.
@rtype: I{list} of Shotgun event dictionaries.
"""
nextEventId = None
for newId in [
coll.getNextUnprocessedEventId() for coll in self._pluginCollections
]:
if newId is not None and (nextEventId is None or newId < nextEventId):
nextEventId = newId
if nextEventId is not None:
filters = [["id", "greater_than", nextEventId - 1]]
fields = [
"id",
"event_type",
"attribute_name",
"meta",
"entity",
"user",
"project",
"session_uuid",
"created_at",
]
order = [{"column": "id", "direction": "asc"}]
conn_attempts = 0
while True:
try:
events = self._sg.find(
"EventLogEntry",
filters,
fields,
order,
limit=self.config.getMaxEventBatchSize(),
)
if events:
self.log.debug(
"Got %d events: %d to %d.",
len(events),
events[0]["id"],
events[-1]["id"],
)
return events
except (sg.ProtocolError, sg.ResponseError, socket.error) as err:
conn_attempts = self._checkConnectionAttempts(
conn_attempts, str(err)
)
except Exception as err:
msg = "Unknown error: %s" % str(err)
conn_attempts = self._checkConnectionAttempts(conn_attempts, msg)
return []
def _saveEventIdData(self):
"""
Save an event Id to persistant storage.
Next time the engine is started it will try to read the event id from
this location to know at which event it should start processing.
"""
eventIdFile = self.config.getEventIdFile()
if eventIdFile is not None:
for collection in self._pluginCollections:
self._eventIdData[collection.path] = collection.getState()
for colPath, state in self._eventIdData.items():
if state:
try:
with open(eventIdFile, "wb") as fh:
# Use protocol 2 so it can also be loaded in Python 2
pickle.dump(self._eventIdData, fh, protocol=2)
except OSError as err:
self.log.error(
"Can not write event id data to %s.\n\n%s",
eventIdFile,
traceback.format_exc(err),
)
break
else:
self.log.warning("No state was found. Not saving to disk.")
def _checkConnectionAttempts(self, conn_attempts, msg):
conn_attempts += 1
if conn_attempts == self._max_conn_retries:
self.log.error(
"Unable to connect to SG (attempt %s of %s): %s",
conn_attempts,
self._max_conn_retries,
msg,
)
conn_attempts = 0
time.sleep(self._conn_retry_sleep)
else:
self.log.warning(
"Unable to connect to SG (attempt %s of %s): %s",
conn_attempts,
self._max_conn_retries,
msg,
)
return conn_attempts
class PluginCollection(object):
"""
A group of plugin files in a location on the disk.
"""
def __init__(self, engine, path):
if not os.path.isdir(path):
raise ValueError("Invalid path: %s" % path)
self._engine = engine
self.path = path
self._plugins = {}
self._stateData = {}
def setState(self, state):
if isinstance(state, int):
for plugin in self:
plugin.setState(state)
self._stateData[plugin.getName()] = plugin.getState()
else:
self._stateData = state
for plugin in self:
pluginState = self._stateData.get(plugin.getName())
if pluginState:
plugin.setState(pluginState)
def getState(self):
for plugin in self:
self._stateData[plugin.getName()] = plugin.getState()
return self._stateData
def getNextUnprocessedEventId(self):
eId = None
for plugin in self:
if not plugin.isActive():
continue
newId = plugin.getNextUnprocessedEventId()
if newId is not None and (eId is None or newId < eId):
eId = newId
return eId
def process(self, event):
for plugin in self:
if plugin.isActive():
plugin.process(event)
else:
plugin.logger.debug("Skipping: inactive.")
def load(self):
"""
Load plugins from disk.
General behavior:
- Loop on all paths.
- Find all valid .py plugin files.
- Loop on all plugin files.
- For any new plugins, load them, otherwise, refresh them.
"""
newPlugins = {}
for basename in os.listdir(self.path):
if not basename.endswith(".py") or basename.startswith("."):
continue
if basename in self._plugins:
newPlugins[basename] = self._plugins[basename]
else:
newPlugins[basename] = Plugin(
self._engine, os.path.join(self.path, basename)
)
newPlugins[basename].load()
self._plugins = newPlugins
def __iter__(self):
for basename in sorted(self._plugins.keys()):
yield self._plugins[basename]
class Plugin(object):
"""
The plugin class represents a file on disk which contains one or more
callbacks.
"""
def __init__(self, engine, path):
"""
@param engine: The engine that instanciated this plugin.
@type engine: L{Engine}
@param path: The path of the plugin file to load.
@type path: I{str}
@raise ValueError: If the path to the plugin is not a valid file.
"""
self._engine = engine
self._path = path
if not os.path.isfile(path):
raise ValueError("The path to the plugin is not a valid file - %s." % path)
self._pluginName = os.path.splitext(os.path.split(self._path)[1])[0]
self._active = True
self._callbacks = []
self._mtime = None
self._lastEventId = None
self._backlog = {}
# Setup the plugin's logger
self.logger = logging.getLogger("plugin." + self.getName())
self.logger.config = self._engine.config
self._engine.setEmailsOnLogger(self.logger, True)
self.logger.setLevel(self._engine.config.getLogLevel())
if self._engine.config.getLogMode() == 1:
_setFilePathOnLogger(
self.logger, self._engine.config.getLogFile("plugin." + self.getName())
)
def getName(self):
return self._pluginName
def setState(self, state):
if isinstance(state, int):
self._lastEventId = state
elif isinstance(state, tuple):
self._lastEventId, self._backlog = state
else:
raise ValueError("Unknown state type: %s." % type(state))
def getState(self):
return (self._lastEventId, self._backlog)
def getNextUnprocessedEventId(self):
if self._lastEventId:
nextId = self._lastEventId + 1
else:
nextId = None
now = datetime.datetime.now()
for k in list(self._backlog):
v = self._backlog[k]
if v < now:
self.logger.warning("Timeout elapsed on backlog event id %d.", k)
del self._backlog[k]
elif nextId is None or k < nextId:
nextId = k
return nextId
def isActive(self):
"""
Is the current plugin active. Should it's callbacks be run?
@return: True if this plugin's callbacks should be run, False otherwise.
@rtype: I{bool}
"""
return self._active
def setEmails(self, *emails):
"""
Set the email addresses to whom this plugin should send errors.
@param emails: See L{LogFactory.getLogger}'s emails argument for info.
@type emails: A I{list}/I{tuple} of email addresses or I{bool}.
"""
self._engine.setEmailsOnLogger(self.logger, emails)
def load(self):
"""
Load/Reload the plugin and all its callbacks.
If a plugin has never been loaded it will be loaded normally. If the
plugin has been loaded before it will be reloaded only if the file has
been modified on disk. In this event callbacks will all be cleared and
reloaded.
General behavior:
- Try to load the source of the plugin.
- Try to find a function called registerCallbacks in the file.
- Try to run the registration function.
At every step along the way, if any error occurs the whole plugin will
be deactivated and the function will return.
"""
# Check file mtime
mtime = os.path.getmtime(self._path)
if self._mtime is None:
self._engine.log.info("Loading plugin at %s" % self._path)
elif self._mtime < mtime:
self._engine.log.info("Reloading plugin at %s" % self._path)
else:
# The mtime of file is equal or older. We don't need to do anything.
return
# Reset values
self._mtime = mtime
self._callbacks = []
self._active = True
try:
plugin = imp.load_source(self._pluginName, self._path)
except:
self._active = False
self.logger.error(
"Could not load the plugin at %s.\n\n%s",
self._path,
traceback.format_exc(),
)
return
regFunc = getattr(plugin, "registerCallbacks", None)
if callable(regFunc):
try:
regFunc(Registrar(self))
except:
self._engine.log.critical(
"Error running register callback function from plugin at %s.\n\n%s",
self._path,
traceback.format_exc(),
)
self._active = False
else:
self._engine.log.critical(
"Did not find a registerCallbacks function in plugin at %s.", self._path
)
self._active = False
def registerCallback(
self,
sgScriptName,
sgScriptKey,
callback,
matchEvents=None,
args=None,
stopOnError=True,
):
"""
Register a callback in the plugin.
"""
global sg
sgConnection = sg.Shotgun(
self._engine.config.getShotgunURL(),
sgScriptName,
sgScriptKey,
http_proxy=self._engine.config.getEngineProxyServer(),
)
self._callbacks.append(
Callback(
callback,
self,
self._engine,
sgConnection,
matchEvents,
args,
stopOnError,
)
)
def process(self, event):
if event["id"] in self._backlog:
if self._process(event):
self.logger.info("Processed id %d from backlog." % event["id"])
del self._backlog[event["id"]]
self._updateLastEventId(event)
elif self._lastEventId is not None and event["id"] <= self._lastEventId:
msg = "Event %d is too old. Last event processed was (%d)."
self.logger.debug(msg, event["id"], self._lastEventId)
else:
if self._process(event):
self._updateLastEventId(event)
return self._active
def _process(self, event):
for callback in self:
if callback.isActive():
if callback.canProcess(event):
msg = "Dispatching event %d to callback %s."
self.logger.debug(msg, event["id"], str(callback))
if not callback.process(event):
# A callback in the plugin failed. Deactivate the whole
# plugin.
self._active = False
break
else:
msg = "Skipping inactive callback %s in plugin."
self.logger.debug(msg, str(callback))
return self._active
def _updateLastEventId(self, event):
BACKLOG_TIMEOUT = (
5 # time in minutes after which we consider a pending event won't happen
)
if self._lastEventId is not None and event["id"] > self._lastEventId + 1:
event_date = event["created_at"].replace(tzinfo=None)
if datetime.datetime.now() > (
event_date + datetime.timedelta(minutes=BACKLOG_TIMEOUT)
):
# the event we've just processed happened more than BACKLOG_TIMEOUT minutes ago so any event
# with a lower id should have shown up in the EventLog by now if it actually happened
if event["id"] == self._lastEventId + 2:
self.logger.info(
"Event %d never happened - ignoring.", self._lastEventId + 1
)
else:
self.logger.info(
"Events %d-%d never happened - ignoring.",
self._lastEventId + 1,
event["id"] - 1,
)
else:
# in this case, we want to add the missing events to the backlog as they could show up in the
# EventLog within BACKLOG_TIMEOUT minutes, during which we'll keep asking for the same range
# them to show up until they expire
expiration = datetime.datetime.now() + datetime.timedelta(
minutes=BACKLOG_TIMEOUT
)
for skippedId in range(self._lastEventId + 1, event["id"]):
self.logger.info("Adding event id %d to backlog.", skippedId)
self._backlog[skippedId] = expiration
self._lastEventId = event["id"]
def __iter__(self):
"""
A plugin is iterable and will iterate over all its L{Callback} objects.
"""
return self._callbacks.__iter__()
def __str__(self):
"""
Provide the name of the plugin when it is cast as string.
@return: The name of the plugin.
@rtype: I{str}
"""
return self.getName()
class Registrar(object):
"""
See public API docs in docs folder.
"""
def __init__(self, plugin):
"""
Wrap a plugin so it can be passed to a user.
"""
self._plugin = plugin
self._allowed = ["logger", "setEmails", "registerCallback"]
def getLogger(self):
"""
Get the logger for this plugin.
@return: The logger configured for this plugin.
@rtype: L{logging.Logger}
"""
# TODO: Fix this ugly protected member access
return self.logger
def __getattr__(self, name):
if name in self._allowed:
return getattr(self._plugin, name)
raise AttributeError(
"type object '%s' has no attribute '%s'" % (type(self).__name__, name)
)
class Callback(object):
"""
A part of a plugin that can be called to process a Shotgun event.
"""
def __init__(
self,
callback,
plugin,
engine,
shotgun,
matchEvents=None,
args=None,
stopOnError=True,
):
"""
@param callback: The function to run when a Shotgun event occurs.
@type callback: A function object.
@param engine: The engine that will dispatch to this callback.
@type engine: L{Engine}.
@param shotgun: The Shotgun instance that will be used to communicate
with your Shotgun server.
@type shotgun: L{sg.Shotgun}
@param matchEvents: The event filter to match events against before invoking callback.
@type matchEvents: dict
@param args: Any datastructure you would like to be passed to your
callback function. Defaults to None.
@type args: Any object.
@raise TypeError: If the callback is not a callable object.
"""
if not callable(callback):
raise TypeError(
"The callback must be a callable object (function, method or callable class instance)."
)
self._name = None
self._shotgun = shotgun
self._callback = callback
self._engine = engine
self._logger = None
self._matchEvents = matchEvents
self._args = args
self._stopOnError = stopOnError
self._active = True
# Find a name for this object
if hasattr(callback, "__name__"):
self._name = callback.__name__
elif hasattr(callback, "__class__") and hasattr(callback, "__call__"):
self._name = "%s_%s" % (callback.__class__.__name__, hex(id(callback)))
else:
raise ValueError(
"registerCallback should be called with a function or a callable object instance as callback argument."
)
# TODO: Get rid of this protected member access
self._logger = logging.getLogger(plugin.logger.name + "." + self._name)
self._logger.config = self._engine.config
def canProcess(self, event):
if not self._matchEvents:
return True
if "*" in self._matchEvents:
eventType = "*"
else:
eventType = event["event_type"]
if eventType not in self._matchEvents:
return False
attributes = self._matchEvents[eventType]
if attributes is None or "*" in attributes:
return True
if event["attribute_name"] and event["attribute_name"] in attributes:
return True
return False
def process(self, event):
"""
Process an event with the callback object supplied on initialization.
If an error occurs, it will be logged appropriately and the callback
will be deactivated.
@param event: The Shotgun event to process.
@type event: I{dict}
"""
# set session_uuid for UI updates
if self._engine._use_session_uuid:
self._shotgun.set_session_uuid(event["session_uuid"])
if self._engine.timing_logger:
start_time = datetime.datetime.now(SG_TIMEZONE.local)
try:
self._callback(self._shotgun, self._logger, event, self._args)
error = False
except:
error = True
# Get the local variables of the frame of our plugin
tb = sys.exc_info()[2]
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
msg = "An error occured processing an event.\n\n%s\n\nLocal variables at outer most frame in plugin:\n\n%s"
self._logger.critical(
msg, traceback.format_exc(), pprint.pformat(stack[1].f_locals)
)
if self._stopOnError:
self._active = False
if self._engine.timing_logger:
callback_name = self._logger.name.replace("plugin.", "")
end_time = datetime.datetime.now(SG_TIMEZONE.local)
duration = self._prettyTimeDeltaFormat(end_time - start_time)
delay = self._prettyTimeDeltaFormat(start_time - event["created_at"])
msg_format = "event_id=%d created_at=%s callback=%s start=%s end=%s duration=%s error=%s delay=%s"
data = [
event["id"],
event["created_at"].isoformat(),
callback_name,
start_time.isoformat(),
end_time.isoformat(),
duration,
str(error),
delay,
]
self._engine.timing_logger.info(msg_format, *data)
return self._active
def _prettyTimeDeltaFormat(self, time_delta):
days, remainder = divmod(time_delta.total_seconds(), 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
return "%02d:%02d:%02d:%02d.%06d" % (
days,
hours,
minutes,
seconds,
time_delta.microseconds,
)
def isActive(self):
"""
Check if this callback is active, i.e. if events should be passed to it
for processing.
@return: True if this callback should process events, False otherwise.
@rtype: I{bool}
"""
return self._active
def __str__(self):
"""
The name of the callback.
@return: The name of the callback
@rtype: I{str}
"""
return self._name
class CustomSMTPHandler(logging.handlers.SMTPHandler):
"""
A custom SMTPHandler subclass that will adapt it's subject depending on the
error severity.
"""
LEVEL_SUBJECTS = {
logging.ERROR: "ERROR - SG event daemon.",
logging.CRITICAL: "CRITICAL - SG event daemon.",
}
def __init__(
self, smtpServer, fromAddr, toAddrs, emailSubject, credentials=None, secure=None
):
args = [smtpServer, fromAddr, toAddrs, emailSubject, credentials]
if credentials:
# Python 2.7 implemented the secure argument
if CURRENT_PYTHON_VERSION >= PYTHON_27:
args.append(secure)
else:
self.secure = secure
logging.handlers.SMTPHandler.__init__(self, *args)
def getSubject(self, record):
subject = logging.handlers.SMTPHandler.getSubject(self, record)
if record.levelno in self.LEVEL_SUBJECTS:
return subject + " " + self.LEVEL_SUBJECTS[record.levelno]
return subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
# Mostly copied from Python 2.7 implementation.
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(),
msg,
)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.close()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventDaemonError(Exception):
"""
Base error for the Shotgun event system.
"""
pass
class ConfigError(EventDaemonError):
"""
Used when an error is detected in the config file.
"""
pass
if sys.platform == "win32":
class WindowsService(win32serviceutil.ServiceFramework):
"""
Windows service wrapper
"""
_svc_name_ = "ShotgunEventDaemon"
_svc_display_name_ = "Shotgun Event Handler"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self._engine = Engine(_getConfigPath())
def SvcStop(self):
"""
Stop the Windows service.
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self._engine.stop()
def SvcDoRun(self):
"""
Start the Windows service.
"""
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ""),
)
self.main()
def main(self):
"""
Primary Windows entry point
"""
self._engine.start()
class LinuxDaemon(daemonizer.Daemon):
"""
Linux Daemon wrapper or wrapper used for foreground operation on Windows
"""
def __init__(self):
self._engine = Engine(_getConfigPath())
super(LinuxDaemon, self).__init__(
"shotgunEvent", self._engine.config.getEnginePIDFile()
)
def start(self, daemonize=True):
if not daemonize:
# Setup the stdout logger
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(levelname)s:%(name)s:%(message)s")
)
logging.getLogger().addHandler(handler)
super(LinuxDaemon, self).start(daemonize)
def _run(self):
"""
Start the engine's main loop
"""
self._engine.start()
def _cleanup(self):
self._engine.stop()
def main():
""" """
if CURRENT_PYTHON_VERSION <= PYTHON_26:
print(
"Python 2.5 and older is not supported anymore. Please use Python 2.6 or newer."
)
return 3
action = None
if len(sys.argv) > 1:
action = sys.argv[1]
if sys.platform == "win32" and action != "foreground":
win32serviceutil.HandleCommandLine(WindowsService)
return 0
if action:
daemon = LinuxDaemon()
# Find the function to call on the daemon and call it
func = getattr(daemon, action, None)
if action[:1] != "_" and func is not None:
func()
return 0
print("Unknown command: %s" % action)
print("usage: %s start|stop|restart|foreground" % sys.argv[0])
return 2
def _getConfigPath():
"""
Get the path of the shotgunEventDaemon configuration file.
"""
paths = ["/etc", os.path.dirname(__file__)]
# Get the current path of the daemon script
scriptPath = sys.argv[0]
if scriptPath != "" and scriptPath != "-c":
# Make absolute path and eliminate any symlinks if any.
scriptPath = os.path.abspath(scriptPath)
scriptPath = os.path.realpath(scriptPath)
# Add the script's directory to the paths we'll search for the config.
paths[:0] = [os.path.dirname(scriptPath)]
# Search for a config file.
for path in paths:
path = os.path.join(path, "shotgunEventDaemon.conf")
if os.path.exists(path):
return path
# No config file was found
raise EventDaemonError("Config path not found, searched %s" % ", ".join(paths))
if __name__ == "__main__":
sys.exit(main())
| 33.179598 | 151 | 0.565301 | [
"MIT"
] | darkvertex/shotgunEvents | src/shotgunEventDaemon.py | 46,186 | Python |
"""
Project Euler Problem 7: https://projecteuler.net/problem=7
10001st prime
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we
can see that the 6th prime is 13.
What is the 10001st prime number?
References:
- https://en.wikipedia.org/wiki/Prime_number
"""
import itertools
import math
def prime_check(number: int) -> bool:
"""
Determines whether a given number is prime or not
>>> prime_check(2)
True
>>> prime_check(15)
False
>>> prime_check(29)
True
"""
if number % 2 == 0 and number > 2:
return False
return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
def prime_generator():
"""
Generate a sequence of prime numbers
"""
num = 2
while True:
if prime_check(num):
yield num
num += 1
def solution(nth: int = 10001) -> int:
"""
Returns the n-th prime number.
>>> solution(6)
13
>>> solution(1)
2
>>> solution(3)
5
>>> solution(20)
71
>>> solution(50)
229
>>> solution(100)
541
"""
return next(itertools.islice(prime_generator(), nth - 1, nth))
if __name__ == "__main__":
print(f"{solution() = }")
| 17.710145 | 75 | 0.582651 | [
"MIT"
] | 04n0/TheAlgorithms-Python | project_euler/problem_007/sol3.py | 1,222 | Python |
from datetime import datetime
import ujson
from django.db import transaction
from django.test import TestCase
from math import pi
from convertable_model.models import BarFooFooModel
from convertable_model.models import FooBarModel
from convertable_model.models import FooModel, BarModel
from convertable_model.models import MixedFooBarModel
class JSONConvertibleModelTest(TestCase):
def tearDown(self) -> None:
FooModel.objects.all().delete()
def _test_equality(self, model: any, *, pk: int, result: dict):
obj = model.objects.get(pk=pk)
obj_data = ujson.loads(obj.to_json())
obj_expected_result = self.get_expected_result(result)
self.assertEqual(obj_data, obj_expected_result)
@staticmethod
def get_expected_result(data: iter) -> dict:
return ujson.loads(ujson.dumps(data))
@staticmethod
def normal_foo_results() -> dict:
return {
1: {'IntegerField': None,
'My_name': 'val1',
'foo3': pi,
'foo4': datetime.utcnow(),
'id': 1
},
2: {'IntegerField': 10,
'My_name': 'val2',
'foo3': pi,
'foo4': datetime.utcnow(),
'id': 2
},
3: {'IntegerField': 9999,
'My_name': 'val1',
'foo3': pi,
'foo4': datetime.utcnow(),
'id': 3
},
}
@staticmethod
def create_foo_objects():
FooModel.objects.create(foo2='val1', foo3=1.56)
FooModel.objects.create(foo1=10, foo2='val2', foo3=2.34)
FooModel.objects.create(foo1=9999, foo2='val1', foo3=7**0.5)
def test_normal_foo_model(self):
results = self.normal_foo_results()
self.create_foo_objects()
for i in range(1, 4):
self._test_equality(FooModel, pk=i, result=results[i])
@staticmethod
def normal_bar_results() -> dict:
return {
1: {'bar1': None,
'bar2': 'Hello World',
'bar4': datetime.utcnow()
},
2: {'bar1': None,
'bar2': 'Hello World',
'bar4': datetime.utcnow()
},
3: {'bar1': 2,
'bar2': 'Hello World',
'bar4': datetime.utcnow()
},
}
@staticmethod
def create_bar_objects():
BarModel.objects.create(bar3=0.1234)
BarModel.objects.create(bar2='Some random string', bar3=0.1004)
BarModel.objects.create(bar1=2, bar2='Another random string', bar3=0.44)
def test_normal_bar_model(self):
self.create_bar_objects()
results = self.normal_bar_results()
for i in range(1, 4):
self._test_equality(BarModel, pk=i, result=results[i])
# test json array
json_array = ujson.loads(BarModel.to_json_array(BarModel.objects.all()))
self.assertEqual(json_array,
self.get_expected_result(results.values()))
@staticmethod
def foo_foreignkey_results() -> dict:
return {
1: {
'FooModel': 'val1: 0',
'foobar2': 'CALL ME SNAKE',
'foobar3': 24.45,
},
2: {
'FooModel': 'None',
'foobar2': 'CALL ME TIGER',
'foobar3': 23.22,
},
}
def test_with_foreignkey(self):
results = self.foo_foreignkey_results()
with transaction.atomic():
self.create_foo_objects()
FooBarModel.objects.create(foobar1=FooModel.objects.get(pk=1),
foobar2='call me snake',
foobar3=24.45)
FooBarModel.objects.create(foobar2='call me tiger',
foobar3=23.22)
for i in range(1, 3):
self._test_equality(FooBarModel, pk=i, result=results[i])
def result_many_to_many(self) -> dict:
return {
'id': 1,
'foofoo': [
self.normal_foo_results()[1],
self.normal_foo_results()[2]
],
'barbar': [
self.normal_bar_results()[2],
self.normal_bar_results()[3],
],
'barfoo': 'BarFooFooModel: 1'
}
def test_many_to_many_and_foreignkey(self):
with transaction.atomic():
self.create_foo_objects()
self.create_bar_objects()
BarFooFooModel.objects.create(barfoo1=BarModel.objects.get(pk=1))
mixed_obj = MixedFooBarModel.objects.create(barfoo=BarFooFooModel.objects.get(pk=1))
mixed_obj.foofoo.add(FooModel.objects.get(pk=1))
mixed_obj.foofoo.add(FooModel.objects.get(pk=2))
mixed_obj.barbar.add(BarModel.objects.get(pk=2))
mixed_obj.barbar.add(BarModel.objects.get(pk=3))
obj = MixedFooBarModel.objects.get(pk=1)
obj_data = ujson.loads(obj.to_json())
obj_expected_result = self.get_expected_result(self.result_many_to_many())
self.assertEqual(obj_data, obj_expected_result)
| 33.948052 | 92 | 0.55088 | [
"MIT"
] | FelixTheC/-django-json-convertible-models | convertable_model/tests.py | 5,228 | Python |
import os
import subprocess
usersList = subprocess.check_output("grep '/home/' /etc/passwd | cut -d: -f1", shell=True)
users = usersList.splitlines()
totalFails = 3
for user in users:
fails = subprocess.check_output("cat /var/log/auth.log | grep '" + user.decode('UTF-8') + " ' | grep 'ssh.*Failed' | wc -l", shell=True)
if(int(fails.decode('UTF-8')) >= totalFails):
os.system("passwd " + user.decode('UTF-8') + " -l")
| 30.571429 | 137 | 0.656542 | [
"MIT"
] | SoursosK/Linux-Security-Tools | forth.py | 428 | Python |
import requests
from collections import OrderedDict
from django.utils.http import urlencode
from allauth.socialaccount.providers.core.oauth2.client import (
OAuth2Client,
OAuth2Error,
)
class WeixinOAuth2Client(OAuth2Client):
def get_redirect_url(self, authorization_url, extra_params):
params = {
'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'scope': self.scope,
'response_type': 'code'
}
if self.state:
params['state'] = self.state
params.update(extra_params)
sorted_params = OrderedDict()
for param in sorted(params):
sorted_params[param] = params[param]
return '%s?%s' % (authorization_url, urlencode(sorted_params))
def get_access_token(self, code):
data = {'appid': self.consumer_key,
'redirect_uri': self.callback_url,
'grant_type': 'authorization_code',
'secret': self.consumer_secret,
'scope': self.scope,
'code': code}
params = None
self._strip_empty_keys(data)
url = self.access_token_url
if self.access_token_method == 'GET':
params = data
data = None
# TODO: Proper exception handling
resp = requests.request(self.access_token_method,
url,
params=params,
data=data)
access_token = None
if resp.status_code == 200:
access_token = resp.json()
if not access_token or 'access_token' not in access_token:
raise OAuth2Error('Error retrieving access token: %s'
% resp.content)
return access_token
| 33.574074 | 70 | 0.570877 | [
"MIT"
] | Cairnica/django-allauth | allauth/socialaccount/providers/other/weixin/client.py | 1,813 | Python |
# ---------------------------------------------------------------------
# Rotek.BT.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
from noc.core.validators import is_float
class Script(GetMetricsScript):
name = "Rotek.BT.get_metrics"
@metrics(["Environment | Sensor Status"], volatile=False, access="S") # SNMP version
def get_sensor_status(self, metrics):
for metric in metrics:
port = metric.labels[0].rsplit("::", 1)[-1]
if "st" in port:
continue
value = 1
port = metric.labels[0].rsplit("::", 1)[-1]
status = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if status is None:
continue
if metric.ifindex == 1 and int(status) == 0:
value = 0
elif metric.ifindex == 2:
if is_float(status) and (-55 < float(status) < 600):
value = 0
elif metric.ifindex in [4, 6] and float(status) > 0:
value = 0
elif metric.ifindex == 9 and int(status) != 2:
value = 0
self.set_metric(
id=("Environment | Sensor Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Temperature"], volatile=False, access="S") # SNMP version
def get_temperature(self, metrics):
for metric in metrics:
if not metric.labels:
continue
port = metric.labels[0].rsplit("::", 1)[-1]
if "temp" in port:
value = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if value is None:
continue
if is_float(value):
self.set_metric(
id=("Environment | Temperature", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Voltage"], volatile=False, access="S") # SNMP version
def get_voltage(self, metrics):
for metric in metrics:
value = self.snmp.get(f"1.3.6.1.4.1.41752.5.15.1.{metric.ifindex}.0")
if value is None:
continue
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Voltage", metric.labels),
labels=["noc::module::battery", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Power | Input | Status"], volatile=False, access="S") # SNMP version
def get_power_input_status(self, metrics):
for metric in metrics:
value = 1
res = self.snmp.get("1.3.6.1.4.1.41752.5.15.1.9.0")
port = metric.labels[0].rsplit("::", 1)[-1]
if res not in [1, 2, 3]:
value = 0
self.set_metric(
id=("Environment | Power | Input | Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
| 40.528736 | 98 | 0.474475 | [
"BSD-3-Clause"
] | nocproject/noc | sa/profiles/Rotek/BT/get_metrics.py | 3,526 | Python |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_path = r"C:\Users\iamdo\Downloads\chromedriver.exe" # specify your driver location
driver = webdriver.Chrome(chrome_path)
driver.get("https://github.com/login")
username = "your email" # specify your email
password = "your password" # specify your password
usernamefield = driver.find_element_by_name("login")
usernamefield.clear()
usernamefield.send_keys(username)
pfield = driver.find_element_by_name("password")
pfield.clear()
pfield.send_keys(password)
driver.find_element_by_name("commit").click()
driver.find_element_by_xpath("""/html/body/div[5]/div/aside[1]/div[2]/div[1]/div/h2/a""").click()
reponame = driver.find_element_by_name("repository[name]")
reponame.send_keys("Test") # specify your repository name
driver.find_element_by_xpath("""//*[@id="repository_visibility_public"]""").click()
driver.find_element_by_xpath("""//*[@id="repository_auto_init"]""").click()
driver.find_element_by_xpath("""//*[@id="repo-new-license-details"]/summary""").click()
driver.find_element_by_xpath("""//*[@id="license-label-mit"]""").click()
time.sleep(1)
driver.find_element_by_xpath("""//*[@id="new_repository"]/div[3]/button""").click()
time.sleep(4)
driver.close()
| 44.62069 | 98 | 0.743431 | [
"MIT"
] | iamdonmathew/github_repository | github.py | 1,294 | Python |
from django.contrib import admin
from .models import Post
#pythonadmin.site.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ("pk","text", "pub_date", "author")
search_fields = ("text",)
list_filter = ("pub_date",)
empty_value_display = "-пусто-"
admin.site.register(Post, PostAdmin)
| 23.071429 | 55 | 0.69969 | [
"BSD-3-Clause"
] | bukvaq/yatube_project | yatube/posts/admin.py | 328 | Python |
import asyncio
from typing import List
from app.common import SkipListing
from app.scrapers.base import BaseScraper
class MaartenScraper(BaseScraper):
MAKELAARDIJ: str = "maarten"
BASE_URL: str = "https://www.maartenmakelaardij.nl"
# Specific functions
async def extract_object_urls(self, soup) -> List[str]:
"""
Extract apartment object urls
"""
items = soup.find_all("a")
urls: List[str] = []
for item in items:
if "woning/rotterdam-" in item["href"]:
urls.append(item["href"])
return list(set(urls))
async def get_page_url(self, page_num: int) -> str:
"""
Format page url
"""
return f"{self.BASE_URL}/aanbod/rotterdam/"
async def get_apartment_urls(self) -> List[str]:
"""
Fetch list of apartment urls from inventory
"""
urls = await self.scrape_page(0)
return urls
def extract_features(self, soup):
"""
Extract feature metadata from listing
"""
meta_data = {
"makelaardij": self.MAKELAARDIJ,
"building": {},
"unit": {"energy": {}, "tags": []},
}
dt = soup.find_all("dt")
dd = soup.find_all("dd")
# Features
for ind, key in enumerate(dt):
if "Bouwjaar" in key.string:
meta_data["building"]["year_constructed"] = self.find_int(
dd[ind].string
)
elif "Woonoppervlakte" in key.string:
meta_data["unit"]["area"] = self.find_float(dd[ind].text.split(" ")[0])
elif "Aantal kamers" in key.string:
meta_data["unit"]["num_rooms"] = self.find_int(dd[ind].text)
elif "verdiepingen" in key.string:
meta_data["unit"]["num_floors"] = self.find_int(dd[ind].text)
elif "Status" in key.string:
meta_data["available"] = "Beschikbaar" in dd[ind].text
elif "Buitenruimte" in key.string and "TUIN" in dd[ind].text:
meta_data["unit"]["tags"].append("garden")
# Other fields
meta_data["address"] = soup.find("span", {"class": "adres"}).string
meta_data["asking_price"] = self.find_int(
soup.find("span", {"class": "price"}).string.replace(".", "")
)
description = soup.find("div", {"id": "read-more-content"}).children
for p in description:
p_text = str(p.text)
if "Eigen grond" in p_text:
meta_data["unit"]["own_land"] = True
elif "erfpacht" in p_text:
meta_data["unit"]["own_land"] = False
if "Energielabel" in p_text:
label = p_text.split("Energielabel: ")[1][0]
meta_data["unit"]["energy"]["label"] = label
break
# Bounce broken listings
if not meta_data["unit"].get("area"):
raise SkipListing("Unable to find area")
return meta_data
if __name__ == "__main__":
scraper = MaartenScraper()
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.start())
| 29.897196 | 87 | 0.546733 | [
"MIT"
] | damienallen/makelaardij-notify | server/app/scrapers/maarten.py | 3,199 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('institution_name', models.CharField(max_length=200)),
('address_1', models.CharField(max_length=100)),
('address_2', models.CharField(max_length=100, null=True, blank=True)),
('city', models.CharField(max_length=100)),
('postcode', models.CharField(max_length=10)),
('country', models.CharField(max_length=100)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='addresses')),
],
),
]
| 35.655172 | 114 | 0.612186 | [
"MIT"
] | GETLIMS/LIMS-Backend | lims/addressbook/migrations/0001_initial.py | 1,034 | Python |
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from axes.models import AccessAttempt
class Command(BaseCommand):
args = ''
help = ("List login attempts")
def handle(self, *args, **kwargs):
for at in AccessAttempt.objects.all():
print "%s %s %s" % (at.ip_address, at.username, at.failures)
| 27.5 | 73 | 0.688312 | [
"MIT"
] | CBitLabs/django-axes | axes/management/commands/axes_list_attempts.py | 385 | Python |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'artemis.settings')
from django.conf import settings
app = Celery('artemis')
app.config_from_object('django.conf:settings', )
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 23.956522 | 67 | 0.784029 | [
"Apache-2.0"
] | hyooqi/artemis | artemis/celery.py | 551 | Python |
import logging
import json
import paho.mqtt.client as mqttc
from ioctlgw import version
from ioctlgw.componentstate import ComponentState
LOG = logging.getLogger(__name__)
class MqttConnector(object):
def __init__(self, service):
self.service = service
self.config = self.service.config
self.mqtt_config = self.config["mqtt"]
self.mqtt = mqttc.Client()
self.mqtt_base_topic = self.mqtt_config["topic"]
self.mqtt.on_connect = self.mqtt_on_connect
self.mqtt.on_disconnect = self.mqtt_on_disconnect
self.mqtt.on_message = self.mqtt_on_message
self.mqtt.on_subscribe = self.mqtt_on_subscribe
# MQTT status jobs
self.service.scheduler.add_job(self.publish_status)
self.service.scheduler.add_job(self.publish_status, 'interval', seconds=10, jitter=5)
def start(self):
# Start a background thread to maintain the MQTT connection
LOG.info("MQTT Starting")
if "user" in self.mqtt_config and "pass" in self.mqtt_config:
self.mqtt.username_pw_set(self.mqtt_config["user"], self.mqtt_config["pass"])
mqtt_host = self.mqtt_config["host"]
mqtt_port = self.mqtt_config["port"]
LOG.info("MQTT Connecting to %s:%s", mqtt_host, mqtt_port)
self.mqtt.connect(mqtt_host, mqtt_port, 60)
# Subscribe to interesting MQTT topics
topics = [
"/boards/+/digitaloutput/+/command"
]
for topic_suffix in topics:
self.mqtt.subscribe(f"{self.mqtt_base_topic}{topic_suffix}")
self.mqtt.loop_start()
def mqtt_on_connect(self, client, data, flags, rc):
LOG.info("MQTT Connected %s", rc)
def mqtt_on_disconnect(self, client, userdata, rc):
if rc == 0:
LOG.warning("Unexpected MQTT disconnection.")
else:
LOG.warning("Unexpected MQTT disconnection. Will auto-reconnect")
def mqtt_on_subscribe(self, client, userdata, mid, gqos):
LOG.info("MQTT Subscribed %s", mid)
def mqtt_on_message(self, client, userdata, msg):
LOG.info("MQTT Message %s %s", msg.topic, str(msg.payload))
if msg.topic.startswith(self.mqtt_base_topic):
topic = msg.topic[len(self.mqtt_base_topic) + 1:]
parts = topic.split("/")
# TODO: check number of parts
controller_name = parts[1]
component = parts[2]
num = int(parts[3])
iocontroller = self.service.controllers[controller_name]
if controller_name not in self.service.controllers.keys():
LOG.warning("Message for unknown iocontroller '%s'", controller_name)
return
if component not in ["digitaloutput"]:
LOG.warning("Message for unknown component '%s'", component)
return
if num > iocontroller.num_digital_outputs:
LOG.warning("Output too high for this board: %s", num)
return
action = msg.payload.decode('utf-8').strip().upper()
if action not in ["OFF", "ON"]:
LOG.warning("Unsupported action '%s'", action)
return
LOG.debug("Requesting %s %s %s %s %s", iocontroller, controller_name, component, num, action)
iocontroller.request_digitaloutput(ComponentState(component="digitaloutput", num=num, status=action))
def mqtt_publish_message(self, suffix, payload, qos=0):
topic = "%s/%s" % (self.mqtt_base_topic, suffix)
self.mqtt.publish(topic=topic, payload=payload, qos=0)
LOG.info("%s %s", topic, payload)
def board_connection_event(self, name, event):
self.mqtt_publish_message(suffix=f"boards/{name}/connection", payload=event)
def board_io_event(self, name, state):
self.mqtt_publish_message(suffix=f"boards/{name}/{state.component}/{state.num}/status", payload=state.status)
def board_status(self, name, raw_msg):
assert True
def publish_status(self):
status = {
"version": version()
}
self.mqtt_publish_message(suffix="status", payload=json.dumps(status))
uptime = {
"minutes": self.service.uptime,
"started": self.service.startup.isoformat()
}
self.mqtt_publish_message(suffix="uptime", payload=json.dumps(uptime))
| 40.694444 | 117 | 0.63413 | [
"MIT"
] | natm/iocontrollergw | ioctlgw/mqttconnector.py | 4,395 | Python |
from tithiwa import *
tithiwabot = Tithiwa()
tithiwabot.open_session()
print("'" + tithiwabot.get_my_name() + "', '" + tithiwabot.get_my_about() + "'")
tithiwabot.quit()
browser = 3
#doing something else with browser
tithiwabot = Tithiwa(browser)
tithiwabot.browser = webdriver.Chrome()
tithiwabot.open_session()
print("'" + tithiwabot.get_my_name() + "', '" + tithiwabot.get_my_about() + "'")
tithiwabot.quit()
| 27.6 | 80 | 0.71256 | [
"MIT"
] | pyvelkov/tithiwa | tithiwa/temp.py | 414 | Python |
from flask import Flask
from flask import make_response
from flask import render_template
from flask import request
from flask import session
from blog_site.common.database import Database
from blog_site.webapp.models.blog import Blog
from blog_site.webapp.models.user import User
app = Flask(__name__)
app.secret_key = '\x1e\x14\xe6\xa0\xc5\xcc\xd9\x7f\xe5\xe8\x1cZ\xc5\xf2r\xb0W#\xed\xb6\xc8'
@app.route('/')
def home_temmplate():
return render_template("home.html")
@app.route('/login')
def login_template():
return render_template("login.html")
@app.route('/register')
def register_template():
return render_template("register.html")
@app.before_first_request
def init_database():
Database.initialize()
@app.route('/auth/login', methods=['POST'])
def login_user():
email = request.form['email']
password = request.form['password']
if User.login_valid(email, password):
User.login(email)
else:
session['email'] = None
return render_template("login-error.html")
return render_template("profile.html", email=session['email'])
@app.route('/auth/register', methods=['POST'])
def register_user():
email = request.form['email']
password = request.form['password']
confirm_password = request.form['confirm-password']
if password == confirm_password:
User.register(email, password)
else:
# mismatch passwords
# TODO: Insert validation error
return render_template("register.html")
return render_template("register-success.html", email=session['email'])
@app.route('/blogs/<string:user_id>')
@app.route('/blogs')
def user_blogs(user_id=None):
blogs = None
user = None
if user_id is not None:
user = User.get_by_id(user_id)
else:
if session['email'] is not None:
user = User.get_by_email(session['email'])
blogs = user.get_blogs()
return render_template("user_blogs.html", blogs=blogs, email=user.email)
# TODO: User should be authenticated first before navigating to the post
@app.route('/posts/<string:blog_id>/')
def blog_posts(blog_id):
blog = Blog.from_mongo_in_blog_object(blog_id)
posts = blog.get_post()
return render_template("user_blog_posts.html", blog_title=blog.title, blog_id=blog_id, posts=posts)
@app.route('/blogs/new/', methods=['GET', 'POST'])
def create_new_blog():
if request.method == 'GET':
return render_template("new_blog.html")
else:
title = request.form['title']
description = request.form['description']
user = User.get_by_email(session['email'])
new_blog = Blog(user.email, title, description, user._id)
new_blog.save_to_mongo()
return make_response(blog_posts(user._id))
@app.route('/post/new/<string:blog_id>', methods=['GET', 'POST'])
def create_new_post(blog_id):
if request.method == 'GET':
return render_template("new_post.html", blog_id=blog_id)
else:
title = request.form['title']
content = request.form['content']
blog = Blog.from_mongo_in_blog_object(blog_id)
blog.new_post(title, content)
return make_response(blog_posts(blog_id))
if __name__ == '__main__':
app.run(port=8660, debug='True')
| 27.470588 | 103 | 0.685531 | [
"MIT"
] | jesh-anand/PythonMasterClass | blog_site/webapp/web_app.py | 3,269 | Python |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TASK_STATE_ROUTE_FORMAT = "%s__r%s"
TASK_STATE_TRANSITION_FORMAT = "%s__t%s"
INBOUND_CRITERIA_WIP = "inbound_criteria_wip"
INBOUND_CRITERIA_SATISFIED = "inbound_criteria_satisfied"
INBOUND_CRITERIA_NOT_SATISFIED = "inbound_criteria_not_satisfied"
| 39.761905 | 74 | 0.790419 | [
"Apache-2.0"
] | Anshika-Gautam/orquesta | orquesta/constants.py | 835 | Python |
import time
from grapht.graph import DictGraph, StreamGraph
__author__ = 'willmcginnis'
if __name__ == '__main__':
print('DictGraph')
g = {0: [2], 1: [2, 3], 2: [1, 3], 3: [1, 2, 4, 6], 4: [3, 5], 5: [4, 6, 7], 6: [3, 4, 5], 7: [5, 8], 8: [7], 9: [8], 10: [9], 11: [8, 9], 12: [11], 13: [12], 14: [13], 15: [1]}
gp = DictGraph(g)
print('Original Adjacency Matrix')
print(gp.get_dense())
print('Second Connections')
print(gp.get_n_connection(n=2).toarray())
print('Third Connections')
print(gp.get_n_connection(n=3).toarray())
print('\n\nStream Graph')
# NOTE: You'll need a graph in a postgres db to actually do this.
gp2 = StreamGraph(max_dim=28000000)
gp2.from_psql(username='postgres',
password='admin',
database='',
host='localhost',
schema='directed',
table='graph')
print('Number of non-zero elements')
edges = gp2.get_nnz()
print(edges)
print('Calculating 2nd Degree connections for a %s edge graph' % (edges, ))
start_time = time.time()
temp = gp2.get_n_connection(n=2)
elapsed = time.time() - start_time
print('TIME: %s' % (str(elapsed), ))
print('\nMost Connected N')
res = gp2.most_connected_n(n=25)
print(res) | 30.627907 | 181 | 0.570995 | [
"BSD-3-Clause"
] | wdm0006/grapht | examples/adjacency.py | 1,317 | Python |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class AuditEvent_EntitySchema:
"""
A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A record of an event made for purposes of maintaining a security log. Typical
uses include detection of intrusion attempts and monitoring for inappropriate
usage.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
identifier: Identifies a specific instance of the entity. The reference should always be
version specific.
reference: Identifies a specific instance of the entity. The reference should be version
specific.
type: The type of the object that was involved in this audit event.
role: Code representing the role the entity played in the event being audited.
lifecycle: Identifier for the data life-cycle stage for the entity.
securityLabel: Security labels for the identified entity.
name: A name of the entity in the audit event.
description: Text that describes the entity in more detail.
query: The query parameters for a query-type entities.
detail: Tagged value pairs for conveying additional information about the entity.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema
from spark_fhir_schemas.stu3.complex_types.auditevent_detail import (
AuditEvent_DetailSchema,
)
if (
max_recursion_limit
and nesting_list.count("AuditEvent_Entity") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["AuditEvent_Entity"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies a specific instance of the entity. The reference should always be
# version specific.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies a specific instance of the entity. The reference should be version
# specific.
StructField(
"reference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The type of the object that was involved in this audit event.
StructField(
"type",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Code representing the role the entity played in the event being audited.
StructField(
"role",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifier for the data life-cycle stage for the entity.
StructField(
"lifecycle",
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Security labels for the identified entity.
StructField(
"securityLabel",
ArrayType(
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A name of the entity in the audit event.
StructField("name", StringType(), True),
# Text that describes the entity in more detail.
StructField("description", StringType(), True),
# The query parameters for a query-type entities.
StructField("query", StringType(), True),
# Tagged value pairs for conveying additional information about the entity.
StructField(
"detail",
ArrayType(
AuditEvent_DetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 45.792 | 96 | 0.546646 | [
"Apache-2.0"
] | icanbwell/SparkFhirSchemas | spark_fhir_schemas/stu3/complex_types/auditevent_entity.py | 11,448 | Python |
import numpy as np
import matplotlib.pyplot as plt
import argparse
from camera import add_pmts
parser = argparse.ArgumentParser()
parser.add_argument('inputfile', help='A datafile created by the old SPECT camera')
parser.add_argument(
'--outputfile', '-o', required=False,
dest='outputfile',
help='If given, save the image to outputfile'
)
if __name__ == '__main__':
args = parser.parse_args()
data = np.fromfile(args.inputfile, dtype='<u2')
width = np.sqrt(data.size)
assert width.is_integer()
width = int(width)
img = data.reshape((width, width))
width = 60
x_offset = 1
y_offset = 0
x0 = -width/2 - x_offset
x1 = width/2 - x_offset
y0 = -width/2 - y_offset
y1 = width/2 - y_offset
fig, ax = plt.subplots()
ax.set_aspect(1)
ax.set_axis_bgcolor('k')
plot = ax.imshow(
img,
cmap='inferno',
interpolation='nearest',
extent=np.array([x0, x1, y0, y1]),
)
fig.colorbar(plot, label='Counts')
add_pmts(ax=ax, linewidth=1.5)
ax.set_xlim(-35, 35)
ax.set_ylim(-26, 26)
ax.set_xlabel('$x \,/\, \mathrm{cm}$')
ax.set_ylabel('$y \,/\, \mathrm{cm}$')
if args.outputfile:
fig.savefig(args.outputfile, dpi=300)
else:
plt.show()
| 21.196721 | 83 | 0.614076 | [
"MIT"
] | tudo-spect/plot_spect_images | plot_spect_image.py | 1,293 | Python |
# Standard library
from unittest import mock
# Third-party
import polib
from django.test import TestCase, override_settings
from django.utils.translation import override
# First-party/Local
from i18n import DEFAULT_LANGUAGE_CODE
from licenses import FREEDOM_LEVEL_MAX, FREEDOM_LEVEL_MID, FREEDOM_LEVEL_MIN
from licenses.models import LegalCode, License
from licenses.tests.factories import (
LegalCodeFactory,
LicenseFactory,
TranslationBranchFactory,
)
from licenses.tests.test_transifex import TEST_TRANSIFEX_SETTINGS
from licenses.transifex import TransifexHelper
class LegalCodeQuerySetTest(TestCase):
def test_translated(self):
bylicense30ported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code=""
)
bylicense40 = LicenseFactory(
license_code="by-nc", version="4.0", jurisdiction_code=""
)
cc0v1license = LicenseFactory(
license_code="CC0", version="1.0", jurisdiction_code=""
)
should_be_translated = [
LegalCodeFactory(license=bylicense40),
LegalCodeFactory(license=cc0v1license),
]
should_not_be_translated = [
LegalCodeFactory(license=bylicense30ported),
LegalCodeFactory(license=bylicense30unported),
]
self.assertCountEqual(
should_be_translated, list(LegalCode.objects.translated())
)
self.assertCountEqual(
should_not_be_translated,
set(LegalCode.objects.all()) - set(LegalCode.objects.translated()),
)
def test_valid(self):
bylicense30ported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code="ar"
)
bylicense30unported = LicenseFactory(
license_code="by-nc", version="3.0", jurisdiction_code=""
)
nonbylicense30ported = LicenseFactory(
license_code="xyz", version="3.0", jurisdiction_code="ar"
)
nonbylicense30unported = LicenseFactory(
license_code="xyz", version="3.0", jurisdiction_code=""
)
bylicense40 = LicenseFactory(
license_code="by-nc", version="4.0", jurisdiction_code=""
)
nonbylicense40 = LicenseFactory(
license_code="xyz", version="4.0", jurisdiction_code=""
)
cc0v1license = LicenseFactory(
license_code="CC0", version="1.0", jurisdiction_code=""
)
noncc0v1license = LicenseFactory(
license_code="xyz", version="1.0", jurisdiction_code=""
)
# Test valid()
should_be_valid = [
LegalCodeFactory(license=bylicense30ported),
LegalCodeFactory(license=bylicense30unported),
LegalCodeFactory(license=bylicense40),
LegalCodeFactory(license=cc0v1license),
]
should_not_be_valid = [
LegalCodeFactory(license=nonbylicense30ported),
LegalCodeFactory(license=nonbylicense30unported),
LegalCodeFactory(license=nonbylicense40),
LegalCodeFactory(license=noncc0v1license),
]
self.assertCountEqual(should_be_valid, list(LegalCode.objects.valid()))
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all()) - set(LegalCode.objects.valid()),
)
# Test validgroups()
self.assertCountEqual(
should_be_valid,
list(LegalCode.objects.validgroups()["by4.0"])
+ list(LegalCode.objects.validgroups()["by3.0"])
+ list(LegalCode.objects.validgroups()["zero1.0"]),
)
self.assertCountEqual(
should_not_be_valid,
set(LegalCode.objects.all())
- set(
list(LegalCode.objects.validgroups()["by4.0"])
+ list(LegalCode.objects.validgroups()["by3.0"])
+ list(LegalCode.objects.validgroups()["zero1.0"])
),
)
class LegalCodeModelTest(TestCase):
def test_str(self):
LegalCodeFactory()
legal_code = LegalCode.objects.first()
self.assertEqual(
str(legal_code),
f"LegalCode<{legal_code.language_code},"
f" {str(legal_code.license)}>",
)
def test_translation_domain(self):
data = [
# (expected, license_code, version, jurisdiction, language)
("by-sa_30", "by-sa", "3.0", "", "fr"),
("by-sa_30_xx", "by-sa", "3.0", "xx", "fr"),
]
for expected, license_code, version, jurisdiction, language in data:
with self.subTest(expected):
legalcode = LegalCodeFactory(
license__license_code=license_code,
license__version=version,
license__jurisdiction_code=jurisdiction,
language_code=language,
)
self.assertEqual(expected, legalcode.translation_domain)
@override_settings(DATA_REPOSITORY_DIR="/foo")
def test_translation_filename(self):
data = [
# (expected, license_code, version, jurisdiction, language)
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03.po",
"by-sa",
"0.3",
"",
"de",
),
(
"/foo/legalcode/de/LC_MESSAGES/by-sa_03_xx.po",
"by-sa",
"0.3",
"xx",
"de",
),
]
for expected, license_code, version, jurisdiction, language in data:
with self.subTest(expected):
license = LicenseFactory(
license_code=license_code,
version=version,
jurisdiction_code=jurisdiction,
)
self.assertEqual(
expected,
LegalCodeFactory(
license=license, language_code=language
).translation_filename(),
)
def test_plain_text_url(self):
lc = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="en",
)
lc1 = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="fr",
)
lc2 = LegalCodeFactory(
license__license_code="by",
license__version="4.0",
license__jurisdiction_code="",
language_code="ar",
)
self.assertEqual(lc.plain_text_url, f"{lc.license_url}/index.txt")
self.assertEqual(lc1.plain_text_url, f"{lc1.license_url}.txt")
self.assertEqual(lc2.plain_text_url, f"{lc2.license_url}.txt")
def test_get_pofile(self):
legalcode = LegalCodeFactory()
test_pofile = polib.POFile()
test_translation_filename = "/dev/null"
with mock.patch.object(LegalCode, "translation_filename") as mock_tf:
mock_tf.return_value = test_translation_filename
with mock.patch.object(polib, "pofile") as mock_pofile:
mock_pofile.return_value = test_pofile
result = legalcode.get_pofile()
mock_pofile.assert_called_with("", encoding="utf-8")
self.assertEqual(test_pofile, result)
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_english_pofile(self):
legalcode = LegalCodeFactory(language_code="es")
legalcode_en = LegalCodeFactory(
license=legalcode.license, language_code=DEFAULT_LANGUAGE_CODE
)
test_pofile = polib.POFile()
with mock.patch.object(
License, "get_legalcode_for_language_code"
) as mock_glfl:
mock_glfl.return_value = legalcode_en
with mock.patch.object(legalcode_en, "get_pofile") as mock_gp:
mock_gp.return_value = test_pofile
self.assertEqual(test_pofile, legalcode.get_english_pofile())
self.assertEqual(
test_pofile, legalcode_en.get_english_pofile()
)
mock_glfl.assert_called_with(DEFAULT_LANGUAGE_CODE)
mock_gp.assert_called_with()
@override_settings(DATA_REPOSITORY_DIR="/some/dir")
def test_get_translation_object(self):
# get_translation_object on the model calls the
# i18n.utils.get_translation_object.
legalcode = LegalCodeFactory(
license__version="4.0",
license__license_code="by-sa",
language_code="de",
)
with mock.patch("licenses.models.get_translation_object") as mock_djt:
legalcode.get_translation_object()
mock_djt.assert_called_with(
domain="by-sa_40", django_language_code="de"
)
def test_branch_name(self):
legalcode = LegalCodeFactory(
license__version="4.0",
license__license_code="by-sa",
language_code="de",
)
self.assertEqual("cc4-de", legalcode.branch_name())
legalcode = LegalCodeFactory(
license__version="3.5",
license__license_code="other",
language_code="de",
)
self.assertEqual("other-35-de", legalcode.branch_name())
legalcode = LegalCodeFactory(
license__version="3.5",
license__license_code="other",
language_code="de",
license__jurisdiction_code="xyz",
)
self.assertEqual("other-35-de-xyz", legalcode.branch_name())
def test_has_english(self):
license = LicenseFactory()
lc_fr = LegalCodeFactory(license=license, language_code="fr")
self.assertFalse(lc_fr.has_english())
lc_en = LegalCodeFactory(license=license, language_code="en")
self.assertTrue(lc_fr.has_english())
self.assertTrue(lc_en.has_english())
def _test_get_deed_or_license_path(self, data):
for (
version,
license_code,
jurisdiction_code,
language_code,
expected_deed_path,
expected_deed_symlinks,
expected_license_path,
expected_license_symlinks,
) in data:
license = LicenseFactory(
license_code=license_code,
version=version,
jurisdiction_code=jurisdiction_code,
)
legalcode = LegalCodeFactory(
license=license, language_code=language_code
)
self.assertEqual(
[expected_deed_path, expected_deed_symlinks],
legalcode.get_file_and_links("deed"),
)
self.assertEqual(
[expected_license_path, expected_license_symlinks],
legalcode.get_file_and_links("legalcode"),
)
def test_get_deed_or_license_path_by4(self):
"""
4.0 formula:
/licenses/VERSION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml
4.0 examples:
/licenses/4.0/by-nc-nd_deed_en.html
/licenses/4.0/by-nc-nd_legalcode_en.html
/licenses/4.0/by_deed_en.html
/licenses/4.0/by_legalcode_en.html
/licenses/4.0/by_deed_zh-Hans.html
/licenses/4.0/by_legalcode_zh-Hans.html
"""
self._test_get_deed_or_license_path(
[
(
"4.0",
"by-nc-nd",
"",
"en",
"licenses/by-nc-nd/4.0/deed.en.html",
["deed.html", "index.html"],
"licenses/by-nc-nd/4.0/legalcode.en.html",
["legalcode.html"],
),
(
"4.0",
"by",
"",
"en",
"licenses/by/4.0/deed.en.html",
["deed.html", "index.html"],
"licenses/by/4.0/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"4.0",
"by",
"",
"zh-Hans",
"licenses/by/4.0/deed.zh-Hans.html",
[],
"licenses/by/4.0/legalcode.zh-Hans.html",
[],
),
]
)
def test_get_deed_or_license_path_by3(self):
"""
3.0 formula:
/licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html
3.0 examples:
/licenses/3.0/xu/by_deed_en.html
/licenses/3.0/xu/by_legalcode_en.html
/licenses/3.0/am/by_deed_hy.html
/licenses/3.0/am/by_legalcode_hy.html
/licenses/3.0/rs/by_deed_rs-Cyrl.html
/licenses/3.0/rs/by_legalcode_rs-Cyrl.html
For jurisdiction, I used "xu" to mean "unported".
See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements. # noqa: E501
"""
# Unported
self._test_get_deed_or_license_path(
[
(
"3.0",
"by",
"",
"en",
"licenses/by/3.0/xu/deed.en.html",
[
"../licenses/by/3.0/xu/deed.en.html",
"../deed.html",
"../index.html",
],
"licenses/by/3.0/xu/legalcode.en.html",
[
"../licenses/by/3.0/xu/legalcode.en.html",
"../legalcode.html",
],
),
]
)
# Ported with multiple languages
self._test_get_deed_or_license_path(
[
(
"3.0",
"by",
"ca",
"en",
"licenses/by/3.0/ca/deed.en.html",
["deed.html", "index.html"],
"licenses/by/3.0/ca/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"3.0",
"by-sa",
"ca",
"fr",
"licenses/by-sa/3.0/ca/deed.fr.html",
[],
"licenses/by-sa/3.0/ca/legalcode.fr.html",
[],
),
]
)
# Ported with single language
self._test_get_deed_or_license_path(
[
(
"3.0",
"by-nc-nd",
"am",
"hy",
"licenses/by-nc-nd/3.0/am/deed.hy.html",
["deed.html", "index.html"],
"licenses/by-nc-nd/3.0/am/legalcode.hy.html",
["legalcode.html"],
),
]
)
def test_get_deed_or_license_path_cc0(self):
"""
cc0 formula:
/publicdomain/VERSION/LICENSE_deed_LANGAUGE.html
/publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html
cc0 examples:
/publicdomain/1.0/zero_deed_en.html
/publicdomain/1.0/zero_legalcode_en.html
/publicdomain/1.0/zero_deed_ja.html
/publicdomain/1.0/zero_legalcode_ja.html
"""
self._test_get_deed_or_license_path(
[
(
"1.0",
"CC0",
"",
"en",
"publicdomain/zero/1.0/deed.en.html",
["deed.html", "index.html"],
"publicdomain/zero/1.0/legalcode.en.html",
["legalcode.html"],
),
]
)
self._test_get_deed_or_license_path(
[
(
"1.0",
"CC0",
"",
"ja",
"publicdomain/zero/1.0/deed.ja.html",
[],
"publicdomain/zero/1.0/legalcode.ja.html",
[],
),
]
)
class LicenseModelTest(TestCase):
def test_nc(self):
self.assertFalse(LicenseFactory(license_code="xyz").nc)
self.assertTrue(LicenseFactory(license_code="by-nc-xyz").nc)
def test_nd(self):
self.assertFalse(LicenseFactory(license_code="xyz").nd)
self.assertTrue(LicenseFactory(license_code="by-nd-xyz").nd)
def test_sa(self):
self.assertFalse(LicenseFactory(license_code="xyz").sa)
self.assertTrue(LicenseFactory(license_code="xyz-sa").sa)
def test_get_metadata(self):
# Ported
license = LicenseFactory(
**{
"license_code": "by-nc",
"version": "3.0",
"title_english": "The Title",
"jurisdiction_code": "xyz",
"permits_derivative_works": False,
"permits_reproduction": False,
"permits_distribution": True,
"permits_sharing": True,
"requires_share_alike": True,
"requires_notice": True,
"requires_attribution": True,
"requires_source_code": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
}
)
LegalCodeFactory(license=license, language_code="pt")
LegalCodeFactory(license=license, language_code="en")
data = license.get_metadata()
expected_data = {
"jurisdiction": "xyz",
"license_code": "by-nc",
"permits_derivative_works": False,
"permits_distribution": True,
"permits_reproduction": False,
"permits_sharing": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
"requires_attribution": True,
"requires_notice": True,
"requires_share_alike": True,
"requires_source_code": True,
"title_english": "The Title",
"translations": {
"en": {
"deed": "/licenses/by-nc/3.0/xyz/",
"license": "/licenses/by-nc/3.0/xyz/legalcode",
"title": "The Title",
},
"pt": {
"deed": "/licenses/by-nc/3.0/xyz/deed.pt",
"license": "/licenses/by-nc/3.0/xyz/legalcode.pt",
"title": "The Title",
},
},
"version": "3.0",
}
for key in expected_data.keys():
self.assertEqual(expected_data[key], data[key])
# Unported
license = LicenseFactory(
**{
"license_code": "by-nc",
"version": "3.0",
"title_english": "The Title",
"jurisdiction_code": "",
"permits_derivative_works": False,
"permits_reproduction": False,
"permits_distribution": True,
"permits_sharing": True,
"requires_share_alike": True,
"requires_notice": True,
"requires_attribution": True,
"requires_source_code": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
}
)
LegalCodeFactory(license=license, language_code="en")
data = license.get_metadata()
expected_data = {
"license_code": "by-nc",
"permits_derivative_works": False,
"permits_distribution": True,
"permits_reproduction": False,
"permits_sharing": True,
"prohibits_commercial_use": True,
"prohibits_high_income_nation_use": False,
"requires_attribution": True,
"requires_notice": True,
"requires_share_alike": True,
"requires_source_code": True,
"title_english": "The Title",
"translations": {
"en": {
"deed": "/licenses/by-nc/3.0/",
"license": "/licenses/by-nc/3.0/legalcode",
"title": "The Title",
},
},
"version": "3.0",
}
for key in expected_data.keys():
self.assertEqual(expected_data[key], data[key])
def test_logos(self):
# Every license includes "cc-logo"
self.assertIn("cc-logo", LicenseFactory().logos())
self.assertEqual(
["cc-logo", "cc-zero"], LicenseFactory(license_code="CC0").logos()
)
self.assertEqual(
["cc-logo", "cc-by"],
LicenseFactory(
license_code="by",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=False,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc"],
LicenseFactory(
license_code="by-nc",
version="3.0",
prohibits_commercial_use=True,
requires_share_alike=False,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nd"],
LicenseFactory(
license_code="by-nd",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=False,
permits_derivative_works=False,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-sa"],
LicenseFactory(
license_code="by-sa",
version="4.0",
prohibits_commercial_use=False,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc", "cc-sa"],
LicenseFactory(
license_code="by-nc-sa",
version="4.0",
prohibits_commercial_use=True,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
self.assertEqual(
["cc-logo", "cc-by", "cc-nc", "cc-sa"],
LicenseFactory(
license_code="by-nc-sa",
version="3.0",
prohibits_commercial_use=True,
requires_share_alike=True,
permits_derivative_works=True,
).logos(),
)
def test_get_legalcode_for_language_code(self):
license = LicenseFactory()
lc_pt = LegalCodeFactory(license=license, language_code="pt")
lc_en = LegalCodeFactory(license=license, language_code="en")
with override(language="pt"):
result = license.get_legalcode_for_language_code(None)
self.assertEqual(lc_pt.id, result.id)
result = license.get_legalcode_for_language_code("pt")
self.assertEqual(lc_pt.id, result.id)
result = license.get_legalcode_for_language_code("en")
self.assertEqual(lc_en.id, result.id)
with self.assertRaises(LegalCode.DoesNotExist):
license.get_legalcode_for_language_code("en_us")
result = license.get_legalcode_for_language_code("en-us")
self.assertEqual(lc_en.id, result.id)
def test_resource_name(self):
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code="zys"
)
self.assertEqual("QWERTY 2.7 ZYS", license.resource_name)
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code=""
)
self.assertEqual("QWERTY 2.7", license.resource_name)
def test_resource_slug(self):
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code="zys"
)
self.assertEqual("qwerty_27_zys", license.resource_slug)
license = LicenseFactory(
license_code="qwerty", version="2.7", jurisdiction_code=""
)
self.assertEqual("qwerty_27", license.resource_slug)
def test_str(self):
license = LicenseFactory(
license_code="bx-oh", version="1.3", jurisdiction_code="any"
)
self.assertEqual(
str(license),
f"License<{license.license_code},{license.version},"
f"{license.jurisdiction_code}>",
)
def test_rdf(self):
license = LicenseFactory(
license_code="bx-oh", version="1.3", jurisdiction_code="any"
)
self.assertEqual("RDF Generation Not Implemented", license.rdf())
# def test_default_language_code(self):
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual(
# DEFAULT_LANGUAGE_CODE, license.default_language_code()
# )
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="fr"
# )
# self.assertEqual("fr", license.default_language_code())
#
# def test_get_deed_url(self):
# # https://creativecommons.org/licenses/by-sa/4.0/
# # https://creativecommons.org/licenses/by-sa/4.0/deed.es
# # https://creativecommons.org/licenses/by/3.0/es/
# # https://creativecommons.org/licenses/by/3.0/es/deed.fr
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="ae"
# )
# self.assertEqual("/licenses/bx-oh/1.3/ae/", license.deed_url)
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual("/licenses/bx-oh/1.3/", license.deed_url)
#
# def test_get_deed_url_for_language(self):
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code="ae"
# )
# self.assertEqual(
# "/licenses/bx-oh/1.3/ae/deed.fr",
# license.get_deed_url_for_language("fr"),
# )
# license = LicenseFactory(
# license_code="bx-oh", version="1.3", jurisdiction_code=""
# )
# self.assertEqual(
# "/licenses/bx-oh/1.3/deed.es",
# license.get_deed_url_for_language("es"),
# )
def test_sampling_plus(self):
self.assertTrue(
LicenseFactory(license_code="nc-sampling+").sampling_plus
)
self.assertTrue(LicenseFactory(license_code="sampling+").sampling_plus)
self.assertFalse(LicenseFactory(license_code="sampling").sampling_plus)
self.assertFalse(LicenseFactory(license_code="MIT").sampling_plus)
self.assertFalse(
LicenseFactory(license_code="by-nc-nd-sa").sampling_plus
)
def test_level_of_freedom(self):
data = [
("by", FREEDOM_LEVEL_MAX),
("devnations", FREEDOM_LEVEL_MIN),
("sampling", FREEDOM_LEVEL_MIN),
("sampling+", FREEDOM_LEVEL_MID),
("by-nc", FREEDOM_LEVEL_MID),
("by-nd", FREEDOM_LEVEL_MID),
("by-sa", FREEDOM_LEVEL_MAX),
]
for license_code, expected_freedom in data:
with self.subTest(license_code):
license = LicenseFactory(license_code=license_code)
self.assertEqual(expected_freedom, license.level_of_freedom)
@override_settings(
TRANSIFEX=TEST_TRANSIFEX_SETTINGS,
DATA_REPOSITORY_DIR="/trans/repo",
)
def test_tx_upload_messages(self):
language_code = "es"
legalcode = LegalCodeFactory(language_code=language_code)
license = legalcode.license
test_pofile = polib.POFile()
with mock.patch.object(
license, "get_legalcode_for_language_code"
) as mock_glflc:
mock_glflc.return_value = legalcode
with mock.patch.object(
TransifexHelper, "upload_messages_to_transifex"
) as mock_umtt:
with mock.patch.object(
LegalCode, "get_pofile"
) as mock_get_pofile:
mock_get_pofile.return_value = test_pofile
license.tx_upload_messages()
mock_glflc.assert_called_with("en")
mock_umtt.assert_called_with(legalcode=legalcode)
def test_superseded(self):
lic1 = LicenseFactory()
lic2 = LicenseFactory(is_replaced_by=lic1)
self.assertTrue(lic2.superseded)
self.assertFalse(lic1.superseded)
class TranslationBranchModelTest(TestCase):
def test_str(self):
tc = TranslationBranchFactory(complete=False)
expected = f"Translation branch {tc.branch_name}. In progress."
self.assertEqual(expected, str(tc))
def test_stats(self):
language_code = "es"
lc1 = LegalCodeFactory(language_code=language_code)
tb = TranslationBranchFactory(
language_code=language_code, legalcodes=[lc1]
)
class MockPofile(list):
def untranslated_entries(self):
return [1, 2, 3, 4, 5]
def translated_entries(self):
return [1, 2, 3]
mock_pofile = MockPofile()
with mock.patch.object(LegalCode, "get_pofile") as mock_get_pofile:
mock_get_pofile.return_value = mock_pofile
stats = tb.stats
self.assertEqual(
{
"percent_messages_translated": 37,
"number_of_total_messages": 8,
"number_of_translated_messages": 3,
"number_of_untranslated_messages": 5,
},
stats,
)
| 35.807916 | 103 | 0.540265 | [
"MIT"
] | kerahui/cc-licenses | licenses/tests/test_models.py | 30,759 | Python |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to support the RLTuner and NoteRNNLoader classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
# internal imports
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
LSTM_STATE_NAME = 'lstm'
# Number of output note classes. This is a property of the dataset.
NUM_CLASSES = 38
# Default batch size.
BATCH_SIZE = 128
# Music-related constants.
INITIAL_MIDI_VALUE = 48
NUM_SPECIAL_EVENTS = 2
MIN_NOTE = 48 # Inclusive
MAX_NOTE = 84 # Exclusive
TRANSPOSE_TO_KEY = 0 # C Major
DEFAULT_QPM = 80.0
# Music theory constants used in defining reward functions.
# Note that action 2 = midi note 48.
C_MAJOR_SCALE = [2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26]
C_MAJOR_KEY = [0, 1, 2, 4, 6, 7, 9, 11, 13, 14, 16, 18, 19, 21, 23, 25, 26, 28,
30, 31, 33, 35, 37]
C_MAJOR_TONIC = 14
A_MINOR_TONIC = 23
# The number of half-steps in musical intervals, in order of dissonance
OCTAVE = 12
FIFTH = 7
THIRD = 4
SIXTH = 9
SECOND = 2
FOURTH = 5
SEVENTH = 11
HALFSTEP = 1
# Special intervals that have unique rewards
REST_INTERVAL = -1
HOLD_INTERVAL = -1.5
REST_INTERVAL_AFTER_THIRD_OR_FIFTH = -2
HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH = -2.5
IN_KEY_THIRD = -3
IN_KEY_FIFTH = -5
# Indicate melody direction
ASCENDING = 1
DESCENDING = -1
# Indicate whether a melodic leap has been resolved or if another leap was made
LEAP_RESOLVED = 1
LEAP_DOUBLED = -1
def default_hparams():
"""Generates the hparams used to train note rnn used in paper."""
return tf.contrib.training.HParams(use_dynamic_rnn=True,
batch_size=BATCH_SIZE,
lr=0.0002,
l2_reg=2.5e-5,
clip_norm=5,
initial_learning_rate=0.5,
decay_steps=1000,
decay_rate=0.85,
rnn_layer_sizes=[100],
skip_first_n_losses=32,
one_hot_length=NUM_CLASSES,
exponentially_decay_learning_rate=True)
def basic_rnn_hparams():
"""Generates the hparams used to train a basic_rnn.
These are the hparams used in the .mag file found at
https://github.com/tensorflow/magenta/tree/master/magenta/models/
melody_rnn#pre-trained
Returns:
Hyperparameters of the downloadable basic_rnn pre-trained model.
"""
# TODO(natashajaques): ability to restore basic_rnn from any .mag file.
return tf.contrib.training.HParams(batch_size=128,
rnn_layer_sizes=[512, 512],
one_hot_length=NUM_CLASSES)
def default_dqn_hparams():
"""Generates the default hparams for RLTuner DQN model."""
return tf.contrib.training.HParams(random_action_probability=0.1,
store_every_nth=1,
train_every_nth=5,
minibatch_size=32,
discount_rate=0.95,
max_experience=100000,
target_network_update_rate=0.01)
def autocorrelate(signal, lag=1):
"""Gives the correlation coefficient for the signal's correlation with itself.
Args:
signal: The signal on which to compute the autocorrelation. Can be a list.
lag: The offset at which to correlate the signal with itself. E.g. if lag
is 1, will compute the correlation between the signal and itself 1 beat
later.
Returns:
Correlation coefficient.
"""
n = len(signal)
x = np.asarray(signal) - np.mean(signal)
c0 = np.var(signal)
return (x[lag:] * x[:n - lag]).sum() / float(n) / c0
def linear_annealing(n, total, p_initial, p_final):
"""Linearly interpolates a probability between p_initial and p_final.
Current probability is based on the current step, n. Used to linearly anneal
the exploration probability of the RLTuner.
Args:
n: The current step.
total: The total number of steps that will be taken (usually the length of
the exploration period).
p_initial: The initial probability.
p_final: The final probability.
Returns:
The current probability (between p_initial and p_final).
"""
if n >= total:
return p_final
else:
return p_initial - (n * (p_initial - p_final)) / (total)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sample_softmax(softmax_vect):
"""Samples a note from an array of softmax probabilities.
Tries to do this with numpy, which requires that the probabilities add to 1.0
with extreme precision. If this fails, uses a manual implementation.
Args:
softmax_vect: An array of probabilities.
Returns:
The index of the note that was chosen/sampled.
"""
try:
sample = np.argmax(np.random.multinomial(1, pvals=softmax_vect))
return sample
except: # pylint: disable=bare-except
r = random.uniform(0, np.sum(softmax_vect))
upto = 0
for i in range(len(softmax_vect)):
if upto + softmax_vect[i] >= r:
return i
upto += softmax_vect[i]
tf.logging.warn("Error! sample softmax function shouldn't get here")
print("Error! sample softmax function shouldn't get here")
return len(softmax_vect) - 1
def decoder(event_list, transpose_amount):
"""Translates a sequence generated by RLTuner to MonophonicMelody form.
Args:
event_list: Integer list of encoded notes.
transpose_amount: Key to transpose to.
Returns:
Integer list of MIDI values.
"""
return [e - NUM_SPECIAL_EVENTS if e < NUM_SPECIAL_EVENTS else
e + INITIAL_MIDI_VALUE - transpose_amount for e in event_list]
def make_onehot(int_list, one_hot_length):
"""Convert each int to a one-hot vector.
A one-hot vector is 0 everywhere except at the index equal to the
encoded value.
For example: 5 as a one-hot vector is [0, 0, 0, 0, 0, 1, 0, 0, 0, ...]
Args:
int_list: A list of ints, each of which will get a one-hot encoding.
one_hot_length: The length of the one-hot vector to be created.
Returns:
A list of one-hot encodings of the ints.
"""
return [[1.0 if j == i else 0.0 for j in range(one_hot_length)]
for i in int_list]
def get_inner_scope(scope_str):
"""Takes a tensorflow scope string and finds the inner scope.
Inner scope is one layer more internal.
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with outer scope stripped off.
"""
idx = scope_str.find('/')
return scope_str[idx + 1:]
def trim_variable_postfixes(scope_str):
"""Trims any extra numbers added to a tensorflow scope string.
Necessary to align variables in graph and checkpoint
Args:
scope_str: Tensorflow variable scope string.
Returns:
Scope string with extra numbers trimmed off.
"""
idx = scope_str.find(':')
return scope_str[:idx]
def get_variable_names(graph, scope):
"""Finds all the variable names in a graph that begin with a given scope.
Args:
graph: A tensorflow graph.
scope: A string scope.
Returns:
List of variables.
"""
with graph.as_default():
return [v.name for v in tf.global_variables() if v.name.startswith(scope)]
def get_next_file_name(directory, prefix, extension):
"""Finds next available filename in directory by appending numbers to prefix.
E.g. If prefix is 'myfile', extenstion is '.png', and 'directory' already
contains 'myfile.png' and 'myfile1.png', this function will return
'myfile2.png'.
Args:
directory: Path to the relevant directory.
prefix: The filename prefix to use.
extension: String extension of the file, eg. '.mid'.
Returns:
String name of the file.
"""
name = directory + '/' + prefix + '.' + extension
i = 0
while os.path.isfile(name):
i += 1
name = directory + '/' + prefix + str(i) + '.' + extension
return name
def make_rnn_cell(rnn_layer_sizes, state_is_tuple=False):
"""Makes a default LSTM cell for use in the NoteRNNLoader graph.
This model is only to be used for loading the checkpoint from the research
paper. In general, events_rnn_graph.make_rnn_cell should be used instead.
Args:
rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
RNN.
state_is_tuple: A boolean specifying whether to use tuple of hidden matrix
and cell matrix as a state instead of a concatenated matrix.
Returns:
A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
"""
cells = []
for num_units in rnn_layer_sizes:
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=state_is_tuple)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple)
return cell
def log_sum_exp(xs):
"""Computes the log sum exp value of a tensor."""
maxes = tf.reduce_max(xs, keep_dims=True)
xs -= maxes
return tf.squeeze(maxes, [-1]) + tf.log(tf.reduce_sum(tf.exp(xs), -1))
| 30.909657 | 80 | 0.669522 | [
"Apache-2.0"
] | Aaravmaheshwari/magenta | magenta/models/rl_tuner/rl_tuner_ops.py | 9,922 | Python |
# -*- coding: utf-8 -*-
'''SRFax (www.srfax.com) python library'''
import re
import os.path
import base64
import logging
import requests
URL = 'https://www.srfax.com/SRF_SecWebSvc.php'
LOGGER = logging.getLogger(__name__)
RE_E164 = re.compile(r'^\+\d{7,15}$') # TODO: Replace this with phonenumberslite?
RE_NANP = re.compile(r'^\+1')
class SRFaxError(Exception):
'''SRFax Exception'''
def __init__(self, error_code, message, cause=None, retry=False):
self.error_code = error_code
self.message = message
self.cause = cause
self.retry = retry
super(SRFaxError, self).__init__(error_code, message, cause, retry)
# TODO: In Python3.4, this causes 'AttributeError: 'NoneType' object has no attribute '__context__''
# LOGGER.exception("%s" % (self))
def get_error_code(self):
'''Get exception error code'''
return self.error_code
def get_cause(self):
'''Get exception cause'''
return self.cause
def get_retry(self):
'''Get retry option (should we retry the request?)'''
return self.retry
class SRFax(object):
'''SRFax class'''
def __init__(self, access_id, access_pwd, caller_id=None,
sender_email=None, account_code=None, url=None):
self.access_id = access_id
self.access_pwd = access_pwd
self.caller_id = caller_id
self.sender_email = sender_email
self.account_code = account_code
self.url = url or URL
def queue_fax(self, to_fax_number, filepath,
caller_id=None, sender_email=None, account_code=None):
'''Queue fax for sending'''
to_fax_number = SRFax.verify_fax_numbers(to_fax_number)
fax_type = 'BROADCAST' if len(to_fax_number) > 1 else 'SINGLE'
to_fax_number = '|'.join(to_fax_number)
try:
if isinstance(filepath, basestring):
filepath = [filepath]
except NameError:
if isinstance(filepath, str):
filepath = [filepath]
if not isinstance(filepath, list):
raise TypeError('filepath not properly defined')
if len(filepath) > 5:
raise Exception('More than 5 files defined in filepath')
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sCallerID': caller_id or self.caller_id,
'sSenderEmail': sender_email or self.sender_email,
'sFaxType': fax_type,
'sToFaxNumber': to_fax_number,
'sAccountCode': account_code or self.account_code or '',
}
SRFax.verify_parameters(params)
for i in range(len(filepath)):
path = filepath[i]
basename = os.path.basename(path)
if not isinstance(basename, str):
basename = basename.decode('utf-8')
params['sFileName_%d' % (i + 1)] = basename
content = SRFax.get_file_content(path)
if not isinstance(content, str):
content = content.decode()
params['sFileContent_%d' % (i + 1)] = content
return self.process_request('Queue_Fax', params)
def get_fax_status(self, fax_id):
'''Get fax status'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sFaxDetailsID': fax_id,
}
SRFax.verify_parameters(params)
response = self.process_request('Get_FaxStatus', params)
if len(response) == 1:
response = response[0]
return response
def get_fax_inbox(self, period='ALL'):
'''Get fax inbox'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sPeriod': period,
}
SRFax.verify_parameters(params)
return self.process_request('Get_Fax_Inbox', params)
def get_fax_outbox(self, period='ALL'):
'''Get fax outbox'''
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sPeriod': period,
}
SRFax.verify_parameters(params)
return self.process_request('Get_Fax_Outbox', params)
def retrieve_fax(self, fax_filename, folder, fax_id):
'''Retrieve fax content in Base64 format'''
assert folder in ['IN', 'OUT']
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sFaxFileName': fax_filename,
'sDirection': folder,
'sFaxDetailsID': fax_id
}
SRFax.verify_parameters(params)
response = self.process_request('Retrieve_Fax', params)
if len(response) == 1:
response = response[0]
return response
def delete_fax(self, fax_filename, folder):
'''Delete fax files from server'''
assert folder in ['IN', 'OUT']
if isinstance(fax_filename, str):
fax_filename = [fax_filename]
if not isinstance(fax_filename, list):
raise TypeError('fax_filename not properly defined')
if len(fax_filename) > 5:
raise Exception('More than 5 files defined in fax_filename')
params = {
'access_id': self.access_id,
'access_pwd': self.access_pwd,
'sDirection': folder,
}
SRFax.verify_parameters(params)
for i in range(len(fax_filename)):
params['sFileName_%d' % (i + 1)] = fax_filename[i]
return self.process_request('Delete_Fax', params)
def process_request(self, method, params):
'''Process SRFax SOAP request'''
params['action'] = method
try:
response = requests.post(self.url, json=params)
except Exception as exc:
raise SRFaxError('REQUESTFAILED', 'REST request failed',
cause=exc, retry=True)
return SRFax.process_response(response)
@staticmethod
def process_response(response):
'''Process SRFax SOAP response'''
if not response:
raise SRFaxError('INVALIDRESPONSE', 'Empty response', retry=True)
if response.ok: # TODO: What if it isn't??
response = response.json()
if 'Status' not in response or 'Result' not in response:
raise SRFaxError('INVALIDRESPONSE',
'Status and/or Result not in response: %s'
% (response), retry=True)
result = response['Result']
LOGGER.debug('Result: %s' % (result))
if response['Status'] != 'Success':
errmsg = result
if (isinstance(errmsg, list) and len(errmsg) == 1
and 'ErrorCode' in errmsg[0]):
errmsg = errmsg[0]['ErrorCode']
raise SRFaxError('REQUESTFAILED', errmsg)
if result is None:
result = True
return result
@staticmethod
def verify_parameters(params):
'''Verify that dict values are set'''
for key in params.keys():
if params[key] is None:
raise TypeError('%s not set' % (key))
@staticmethod
def is_e164_number(number):
'''Simple check if number is in E.164 format'''
if isinstance(number, str) and RE_E164.match(number):
return True
return False
@staticmethod
def is_nanp_number(number):
'''Simple check if number is inside North American Numbering Plan'''
if isinstance(number, str) and RE_NANP.match(number):
return True
return False
@staticmethod
def verify_fax_numbers(to_fax_number):
'''Verify and prepare fax numbers for use at SRFax'''
try:
if isinstance(to_fax_number, basestring):
to_fax_number = [to_fax_number]
except NameError:
if isinstance(to_fax_number, str):
to_fax_number = [to_fax_number]
if not isinstance(to_fax_number, list):
raise TypeError('to_fax_number not properly defined')
for i in range(len(to_fax_number)):
number = str(to_fax_number[i])
if not SRFax.is_e164_number(number):
raise TypeError('Number not in E.164 format: %s'
% (number))
if SRFax.is_nanp_number(number):
to_fax_number[i] = number[1:]
else:
to_fax_number[i] = '011' + number[1:]
return to_fax_number
@staticmethod
def get_file_content(filepath):
'''Read and return file content Base64 encoded'''
if not os.path.exists(filepath):
raise Exception('File does not exists: %s' % (filepath))
if not os.path.isfile(filepath):
raise Exception('Not a file: %s' % (filepath))
content = None
try:
fdp = open(filepath, 'rb')
except IOError:
raise
else:
content = fdp.read()
fdp.close()
if not content:
raise Exception('Error reading file or file empty: %s'
% (filepath))
return base64.b64encode(content)
| 31.17 | 108 | 0.573735 | [
"MIT"
] | sunbeamer/srfax-api-python | srfax/srfax.py | 9,351 | Python |
import keras
import keras.backend as K
class Shape(keras.layers.Layer):
def call(self, inputs):
return K.shape(inputs)
def compute_output_shape(self, input_shape):
return (len(input_shape),)
class Cast(keras.layers.Layer):
def __init__(self, dtype, **kwargs):
self.dtype = dtype
super(Cast, self).__init__(**kwargs)
def call(self, inputs):
return K.cast(inputs, self.dtype)
def get_config(self):
config = super(Cast, self).get_config()
config.update(dtype=self.dtype)
return config
| 22.92 | 48 | 0.645724 | [
"MIT"
] | yecharlie/convnet3d | convnet3d/layers/misc.py | 573 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/city/shared_hospital_corellia.iff"
result.attribute_template_id = -1
result.stfName("building_name","hospital")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.764706 | 77 | 0.72967 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/building/player/city/shared_hospital_corellia.py | 455 | Python |
from MoveGetter import MoveGetter
import chess
class CommandLineMoveGetter(MoveGetter):
def getMove(self, board):
print("\n")
print(board)
self.printLegalMoves(board)
return self.getMoveFromCLI(board)
def printLegalMoves(self, board):
for index, move in enumerate(board.legal_moves):
print(str(index) + ": ", end="")
print(board.san(move))
def getMoveFromCLI(self, board):
selection = -1
while(selection < 0 or selection >= len(board.legal_moves)):
try:
selection = int(input("Select a move "))
except ValueError:
print("Invalid input")
# print(board.legal_moves)
for index, move in enumerate(board.legal_moves):
if index == selection:
return move | 29.724138 | 68 | 0.574246 | [
"MIT"
] | okiyama/Chess-EEA-Opponent-Modelling | CommandLineMoveGetter.py | 862 | Python |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pathlib
from qiime2 import sdk
from qiime2.plugin import model
def identity_transformer(view):
return view
class ModelType:
@staticmethod
def from_view_type(view_type):
if issubclass(view_type, model.base.FormatBase):
if issubclass(view_type,
model.SingleFileDirectoryFormatBase):
# HACK: this is necessary because we need to be able to "act"
# like a FileFormat when looking up transformers, but our
# input/output coercion still needs to bridge the
# transformation as we do not have transitivity
# In other words we have DX and we have transformers of X
# In a perfect world we would automatically define DX -> X and
# let transitivity handle it, but since that doesn't exist, we
# need to treat DX as if it were X and coerce behind the scenes
# TODO: redo this when transformers are transitive
return SingleFileDirectoryFormatType(view_type)
# Normal format type
return FormatType(view_type)
else:
# TODO: supporting stdlib.typing may require an alternate
# model type as `isinstance` is a meaningless operation
# for them so validation would need to be handled differently
return ObjectType(view_type)
def __init__(self, view_type):
self._pm = sdk.PluginManager()
self._view_type = view_type
def make_transformation(self, other, recorder=None):
# TODO: do something with the recorder.
transformer = self._get_transformer_to(other)
if transformer is None:
raise Exception("No transformation from %r to %r" %
(self._view_type, other._view_type))
def transformation(view):
view = self.coerce_view(view)
self.validate(view)
new_view = transformer(view)
new_view = other.coerce_view(new_view)
other.validate(new_view)
if transformer is not identity_transformer:
other.set_user_owned(new_view, False)
return new_view
return transformation
def _get_transformer_to(self, other):
transformer = self._lookup_transformer(self._view_type,
other._view_type)
if transformer is None:
return other._get_transformer_from(self)
return transformer
def has_transformation(self, other):
""" Checks to see if there exist transformers for other
Parameters
----------
other : ModelType subclass
The object being checked for transformer
Returns
-------
bool
Does the specified transformer exist for other?
"""
transformer = self._get_transformer_to(other)
return transformer is not None
def _get_transformer_from(self, other):
return None
def coerce_view(self, view):
return view
def _lookup_transformer(self, from_, to_):
if from_ == to_:
return identity_transformer
try:
return self._pm.transformers[from_][to_].transformer
except KeyError:
return None
def set_user_owned(self, view, value):
pass
class FormatType(ModelType):
def coerce_view(self, view):
if type(view) is str or isinstance(view, pathlib.Path):
return self._view_type(view, mode='r')
if isinstance(view, self._view_type):
# wrap original path (inheriting the lifetime) and return a
# read-only instance
return self._view_type(view.path, mode='r')
return view
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not an instance of %r."
% (view, self._view_type))
# Formats have a validate method, so defer to it
view.validate()
def set_user_owned(self, view, value):
view.path._user_owned = value
class SingleFileDirectoryFormatType(FormatType):
def __init__(self, view_type):
# Single file directory formats have only one file named `file`
# allowing us construct a model type from the format of `file`
self._wrapped_view_type = view_type.file.format
super().__init__(view_type)
def _get_transformer_to(self, other):
# Legend:
# - Dx: single directory format of x
# - Dy: single directory format of y
# - x: input format x
# - y: output format y
# - ->: implicit transformer
# - =>: registered transformer
# - |: or, used when multiple situation are possible
# It looks like all permutations because it is...
# Dx -> y | Dy via Dx => y | Dy
transformer = self._wrap_transformer(self, other)
if transformer is not None:
return transformer
# Dx -> Dy via Dx -> x => y | Dy
transformer = self._wrap_transformer(self, other, wrap_input=True)
if transformer is not None:
return transformer
if type(other) is type(self):
# Dx -> Dy via Dx -> x => y -> Dy
transformer = self._wrap_transformer(
self, other, wrap_input=True, wrap_output=True)
if transformer is not None:
return transformer
# Out of options, try for Dx -> Dy via Dx => y -> Dy
return other._get_transformer_from(self)
def _get_transformer_from(self, other):
# x | Dx -> Dy via x | Dx => y -> Dy
# IMPORTANT: reverse other and self, this method is like __radd__
return self._wrap_transformer(other, self, wrap_output=True)
def _wrap_transformer(self, in_, out_, wrap_input=False,
wrap_output=False):
input = in_._wrapped_view_type if wrap_input else in_._view_type
output = out_._wrapped_view_type if wrap_output else out_._view_type
transformer = self._lookup_transformer(input, output)
if transformer is None:
return None
if wrap_input:
transformer = in_._wrap_input(transformer)
if wrap_output:
transformer = out_._wrap_output(transformer)
return transformer
def _wrap_input(self, transformer):
def wrapped(view):
return transformer(view.file.view(self._wrapped_view_type))
return wrapped
def _wrap_output(self, transformer):
def wrapped(view):
new_view = self._view_type()
file_view = transformer(view)
if transformer is not identity_transformer:
self.set_user_owned(file_view, False)
new_view.file.write_data(file_view, self._wrapped_view_type)
return new_view
return wrapped
class ObjectType(ModelType):
def validate(self, view):
if not isinstance(view, self._view_type):
raise TypeError("%r is not of type %r, cannot transform further."
% (view, self._view_type))
| 34.298643 | 79 | 0.600923 | [
"BSD-3-Clause"
] | ebolyen/qiime2 | qiime2/core/transform.py | 7,580 | Python |
import torch
import torch.nn.functional as F
def clamp_probs(probs):
eps = torch.finfo(probs.dtype).eps
return torch.clamp(probs, min=eps, max=1-eps)
def concrete_sample(logits, temperature, shape=torch.Size([])):
'''
Sampling for Concrete distribution.
See Eq. 10 of Maddison et al., 2017.
'''
uniform_shape = torch.Size(shape) + logits.shape
u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,
device=logits.device))
gumbels = - torch.log(- torch.log(u))
scores = (logits + gumbels) / temperature
return scores.softmax(dim=-1)
def bernoulli_concrete_sample(logits, temperature, shape=torch.Size([])):
'''
Sampling for BinConcrete distribution.
See PyTorch source code, differs from Eq. 16 of Maddison et al., 2017.
'''
uniform_shape = torch.Size(shape) + logits.shape
u = clamp_probs(torch.rand(uniform_shape, dtype=torch.float32,
device=logits.device))
return torch.sigmoid((F.logsigmoid(logits) - F.logsigmoid(-logits)
+ torch.log(u) - torch.log(1 - u)) / temperature)
| 34.848485 | 75 | 0.643478 | [
"MIT"
] | iancovert/dl-selection | selection/layers/utils.py | 1,150 | Python |
#Django
from django.contrib import admin
#Model
from cride.rides.models import Ride
@admin.register(Ride)
class RideAdmin(admin.ModelAdmin):
pass | 16.777778 | 35 | 0.788079 | [
"MIT"
] | LhernerRemon/Rider | cride/rides/admin.py | 151 | Python |
downloadable_resource_urls = {
"default-glove-embeddings": {
"filename": "glove.tgz",
"url": "https://fibber-data.s3.amazonaws.com/glove.tgz",
"md5": "7deac3ce2cfd8be24f97bc4ba007a19c",
"untar": True,
"unzip": False,
},
"default-stopwords": {
"filename": "stopwords.txt",
"url": "https://fibber-data.s3.amazonaws.com/stopwords_v1.txt",
"md5": "6d3c778d216390063f424112087e5c10",
"untar": False,
"unzip": False,
},
"nltk-punkt": {
"filename": "punkt.zip",
"url": "https://fibber-data.s3.amazonaws.com/punkt.zip",
"md5": "398bbed6dd3ebb0752fe0735d1c418fe",
"untar": False,
"unzip": True,
},
"nltk_stopwords": {
"filename": "stopwords.zip",
"url": "https://fibber-data.s3.amazonaws.com/nltk_stopwords.zip",
"md5": "884694b9055d1caee8a0ca3aa3b2c7f7",
"untar": False,
"unzip": True,
},
"universal-sentence-encoder": {
"filename": "universal-sentence-encoder-large_5.tar.gz",
"url": "https://fibber-data.s3.amazonaws.com/universal-sentence-encoder-large_5.tar.gz",
"md5": "1ffeec98147fd21cc2d5627ec09f0d57",
"untar": True,
"unzip": False,
},
"bert-base-cased": {
"filename": "bert-base-cased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-base-cased.tgz",
"md5": "a81caaa4f3b98f6b0fb047290df9d355",
"untar": True,
"unzip": False,
},
"bert-base-uncased": {
"filename": "bert-base-uncased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-base-uncased.tgz",
"md5": "e9345a81e506c40a7727f75768e73366",
"untar": True,
"unzip": False,
},
"bert-large-cased": {
"filename": "bert-large-cased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-large-cased.tgz",
"md5": "669cb3a04055dffad348520cb57408a5",
"untar": True,
"unzip": False,
},
"bert-large-uncased": {
"filename": "bert-large-uncased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-large-uncased.tgz",
"md5": "40c48c7c1daeaf867c525eb925ad2dc7",
"untar": True,
"unzip": False,
},
"distilbert-base-cased": {
"filename": "distilbert-base-cased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/distilbert-base-cased.tgz",
"md5": "bc27409472a5d741804034e33ab91a36",
"untar": True,
"unzip": False,
},
"distilbert-base-uncased": {
"filename": "distilbert-base-uncased.tgz",
"url": "https://fibber-data.s3.amazonaws.com/distilbert-base-uncased.tgz",
"md5": "7f59535e2b19e4f8fece5d7d8523c22b",
"untar": True,
"unzip": False,
},
"roberta-base": {
"filename": "roberta-base.tgz",
"url": "https://fibber-data.s3.amazonaws.com/roberta-base.tgz",
"md5": "40eb7497c8c4f56c1eee82d5b5cdaf21",
"untar": True,
"unzip": False,
},
"roberta-large": {
"filename": "roberta-large.tgz",
"url": "https://fibber-data.s3.amazonaws.com/roberta-large.tgz",
"md5": "11667313ecde0f00d86d40912a29edc2",
"untar": True,
"unzip": False,
},
"gpt2-medium": {
"filename": "gpt2-medium.tgz",
"url": "https://fibber-data.s3.amazonaws.com/gpt2-medium.tgz",
"md5": "2de5082b58bc7a2d2fe4f744db2fdafc",
"untar": True,
"unzip": False,
},
"gpt2-large": {
"filename": "gpt2-large.tgz",
"url": "https://fibber-data.s3.amazonaws.com/gpt2-large.tgz",
"md5": "98bd08097c4188e19826d92561caad2b",
"untar": True,
"unzip": False,
},
"bert-base-uncased-clf-demo": {
"filename": "bert-base-uncased-clf-demo.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-base-uncased-clf-demo.tgz",
"md5": "4af6ca998625bbd64d9a6d59a3af035f",
"untar": True,
"unzip": False,
},
"bert-base-uncased-lm-demo": {
"filename": "bert-base-uncased-lm-demo.tgz",
"url": "https://fibber-data.s3.amazonaws.com/bert-base-uncased-lm-demo.tgz",
"md5": "ff8f302c81b0aace15890842b7bb0e17",
"untar": True,
"unzip": False
},
"wpe-demo": {
"filename": "wpe-demo.tgz",
"url": "https://fibber-data.s3.amazonaws.com/wpe-demo.tgz",
"md5": "673fb249c2d63e334bc224c4a7f9bc89",
"untar": True,
"unzip": False
},
"stsb-roberta-large": {
"filename": "stsb-roberta-large.tgz",
"url": "https://fibber-data.s3.amazonaws.com/stsb-roberta-large.tgz",
"md5": "e606aedf35a953b739c0537a11eebeb5",
"untar": True,
"unzip": False
},
"counter-fitted-vectors": {
"filename": "counter-fitted-vectors.zip",
"url": "https://fibber-data.s3.amazonaws.com/counter-fitted-vectors.zip",
"md5": "2515d2a6502d3e7c9b12cfd0e2318bd1",
"untar": False,
"unzip": True
}
}
| 35.545455 | 96 | 0.574464 | [
"MIT"
] | stungkit/fibber | fibber/resources/downloadable_resources.py | 5,083 | Python |
import os
import go_vncdriver
import tensorflow as tf
import argparse
import json
import envs
from model import policies
import checkpoint_utils
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('logdir', type=str, help="Log directory path")
args = parser.parse_args()
with open(args.logdir + "/hparams.json") as f:
hparams = json.load(f)
env = envs.create_env(**hparams)
obs = env.reset()
policyType = policies[hparams['policy']]
policy = policyType(env.observation_space.shape, env.action_space.n, **hparams)
features = policy.get_initial_features()
sess = tf.Session()
#import ipdb; ipdb.set_trace()
checkpoint_utils.init_from_checkpoint(args.logdir + '/train', {'global/':'/'})
#saver = tf.train.Saver(sharded=True)
#saver.restore(sess, os.path.join(args.logdir, 'train/model.ckpt-0'))
sess.run(tf.global_variables_initializer())
with sess.as_default():
while True:
env.render()
fetched = policy.act(obs, *features)
action, value_, features = fetched[0], fetched[1], fetched[2:]
obs, reward, done, info = env.step(action.argmax())
if done:
obs = env.reset()
| 25.782609 | 79 | 0.697302 | [
"MIT"
] | DFrolova/human-rl | universe-starter-agent/run.py | 1,186 | Python |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: © 2022 Decompollaborate
# SPDX-License-Identifier: MIT
from __future__ import annotations
import enum
@enum.unique
class FileSectionType(enum.Enum):
Unknown = -2
Invalid = -1
Text = 1
Data = 2
Rodata = 3
Bss = 4
Reloc = 5
@staticmethod
def fromId(sectionId: int) -> FileSectionType:
if sectionId == 1:
return FileSectionType.Text
if sectionId == 2:
return FileSectionType.Data
if sectionId == 3:
return FileSectionType.Rodata
if sectionId == 4:
return FileSectionType.Bss
if sectionId == 5:
return FileSectionType.Reloc
return FileSectionType.Invalid
@staticmethod
def fromStr(x: str) -> FileSectionType:
if x == ".text":
return FileSectionType.Text
if x == ".data":
return FileSectionType.Data
if x == ".rodata":
return FileSectionType.Rodata
if x == ".bss":
return FileSectionType.Bss
if x == ".reloc":
return FileSectionType.Reloc
return FileSectionType.Invalid
def toStr(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".reloc"
return ""
def toCapitalizedStr(self) -> str:
if self == FileSectionType.Text:
return "Text"
if self == FileSectionType.Data:
return "Data"
if self == FileSectionType.Rodata:
return "RoData"
if self == FileSectionType.Bss:
return "Bss"
if self == FileSectionType.Reloc:
return "Reloc"
return ""
def toSectionName(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".ovl"
return ""
FileSections_ListBasic = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss]
FileSections_ListAll = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss, FileSectionType.Reloc]
| 28.923077 | 135 | 0.582067 | [
"MIT"
] | Decompollaborate/py-mips-disasm | spimdisasm/common/FileSectionType.py | 2,633 | Python |
import numpy as np
import pytest
from pytools.obj_array import make_obj_array
# {{{ test_unstructured_vertex_grid
@pytest.mark.parametrize("ambient_dim", [2, 3])
@pytest.mark.parametrize("dformat", ["xml", "hdf", "binary"])
def test_unstructured_vertex_grid(ambient_dim, dformat, npoints=64):
"""Test constructing a vertex grid with different ways to define the
points and connectivity.
"""
# {{{ set up connectivity
from pyvisfile.xdmf import NumpyDataArray, DataArray, _data_item_from_numpy
connectivity = np.arange(npoints, dtype=np.uint32)
points = np.random.rand(ambient_dim, npoints)
if dformat == "xml":
connectivity = NumpyDataArray(connectivity, name="connectivity")
points = NumpyDataArray(points.T, name="points")
elif dformat in ["hdf", "binary"]:
if dformat == "hdf":
cdata = "geometry.h5:/Grid/Connectivity"
pdata = "geometry.h5:/Grid/Points"
else:
cdata = "connectivity.out"
pdata = "points.out"
connectivity = DataArray((
_data_item_from_numpy(connectivity,
name="connectivity",
data=cdata),
))
points = DataArray((
_data_item_from_numpy(points.T,
name="points",
data=pdata),
))
else:
raise ValueError(f"unknown format: '{dformat}'")
# }}}
# {{{ set up grids
from pyvisfile.xdmf import TopologyType
from pyvisfile.xdmf import XdmfUnstructuredGrid
grid = XdmfUnstructuredGrid(
points, connectivity,
topology_type=TopologyType.Polyvertex,
name="polyvertex")
# }}}
from pyvisfile.xdmf import XdmfWriter
writer = XdmfWriter((grid,))
filename = f"test_unstructured_vertex_{dformat}_{ambient_dim}d.xmf"
writer.write_pretty(filename)
# }}}
# {{{ test_unstructured_simplex_grid
def _simplex_box_connectivity(*, npoints, nelements, nvertices):
# NOTE: largely copied from meshmode/mesh/generation.py::generate_box_mesh
ambient_dim = len(npoints)
point_indices = np.arange(np.prod(npoints)).reshape(npoints)
connectivity = np.empty((nelements, nvertices), dtype=np.uint32)
ielement = 0
from itertools import product
if ambient_dim == 1:
raise NotImplementedError
elif ambient_dim == 2:
for i, j in product(range(npoints[0] - 1), repeat=ambient_dim):
a = point_indices[i + 0, j + 0]
b = point_indices[i + 1, j + 0]
c = point_indices[i + 0, j + 1]
d = point_indices[i + 1, j + 1]
connectivity[ielement + 0, :] = (a, b, c)
connectivity[ielement + 1, :] = (d, c, b)
ielement += 2
elif ambient_dim == 3:
for i, j, k in product(range(npoints[0] - 1), repeat=ambient_dim):
a000 = point_indices[i, j, k]
a001 = point_indices[i, j, k+1]
a010 = point_indices[i, j+1, k]
a011 = point_indices[i, j+1, k+1]
a100 = point_indices[i+1, j, k]
a101 = point_indices[i+1, j, k+1]
a110 = point_indices[i+1, j+1, k]
a111 = point_indices[i+1, j+1, k+1]
connectivity[ielement + 0, :] = (a000, a100, a010, a001)
connectivity[ielement + 1, :] = (a101, a100, a001, a010)
connectivity[ielement + 2, :] = (a101, a011, a010, a001)
connectivity[ielement + 3, :] = (a100, a010, a101, a110)
connectivity[ielement + 4, :] = (a011, a010, a110, a101)
connectivity[ielement + 5, :] = (a011, a111, a101, a110)
ielement += 6
else:
raise NotImplementedError
assert ielement == nelements
from pyvisfile.xdmf import NumpyDataArray
return NumpyDataArray(connectivity, name="connectivity")
@pytest.mark.parametrize("ambient_dim", [2, 3])
def test_unstructured_simplex_grid(ambient_dim, nelements=16):
"""Test constructing a grid with a more complicated topology."""
from pyvisfile.xdmf import TopologyType
if ambient_dim == 1:
topology_type = TopologyType.Polyline
simplices_per_quad = 1
if ambient_dim == 2:
topology_type = TopologyType.Triangle
simplices_per_quad = 2
elif ambient_dim == 3:
topology_type = TopologyType.Tetrahedron
simplices_per_quad = 6
else:
raise ValueError("unsupported dimension")
# {{{ points and connectivity
x = np.linspace(-1.0, 1.0, nelements + 1)
npoints = len(x)
points = np.empty((ambient_dim,) + (npoints,) * ambient_dim)
for idim in range(ambient_dim):
points[idim] = x.reshape((npoints,) + (1,) * (ambient_dim - 1 - idim))
from pyvisfile.xdmf import NumpyDataArray
points = NumpyDataArray(points.reshape(ambient_dim, -1).T, name="points")
from pyvisfile.xdmf import _XDMF_ELEMENT_NODE_COUNT
connectivity = _simplex_box_connectivity(
npoints=(npoints,) * ambient_dim,
nelements=simplices_per_quad * nelements**ambient_dim,
nvertices=_XDMF_ELEMENT_NODE_COUNT[topology_type]
)
# }}}
# {{{ attributes
temperature = np.sin(2.0 * np.pi * points.ary[:, 0]) \
+ np.cos(2.0 * np.pi * points.ary[:, 1])
temperature = NumpyDataArray(temperature, name="temperature")
velocity = points.ary + np.array([0, 1, 2][:ambient_dim]).reshape(1, -1)
velocity = NumpyDataArray(velocity, name="velocity")
vorticity = NumpyDataArray(make_obj_array(velocity.ary), name="vorticity")
# }}}
# {{{ write grids
from pyvisfile.xdmf import XdmfUnstructuredGrid
grid = XdmfUnstructuredGrid(
points, connectivity,
topology_type=topology_type,
name="simplex")
grid.add_attribute(temperature)
grid.add_attribute(velocity)
grid.add_attribute(vorticity)
from pyvisfile.xdmf import XdmfWriter
writer = XdmfWriter((grid,))
filename = f"test_unstructured_simplex_{ambient_dim}d.xmf"
writer.write_pretty(filename)
# }}}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
pytest.main([__file__])
# vim: fdm=marker
| 31.417085 | 79 | 0.621561 | [
"MIT"
] | alexfikl/pyvisfile | test/test_xdmf.py | 6,252 | Python |
import unittest
import torch
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
BertConfig,
BertForSequenceClassification,
GlueDataset,
GlueDataTrainingArguments,
Trainer,
TrainingArguments,
)
from transformers.adapters.composition import Fuse
from transformers.testing_utils import slow
class TestAdapterTrainer(unittest.TestCase):
def test_resume_training(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.set_active_adapters("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer.train()
# create second model that should resume the training of the first
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.set_active_adapters("adapter")
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_resume_training_with_fusion(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model.set_active_adapters(Fuse("adapter", "additional_adapter"))
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
do_save_adapter_fusion=True,
)
trainer.train()
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model_resume.set_active_adapters(Fuse("adapter", "additional_adapter"))
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_full_model=False,
do_save_adapters=True,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_auto_set_save_adapters(self):
model = BertForSequenceClassification(
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
)
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
)
trainer = Trainer(
model=model,
args=training_args,
)
self.assertFalse(trainer.do_save_full_model)
self.assertTrue(trainer.do_save_adapters)
self.assertTrue(trainer.do_save_adapter_fusion)
@slow
def test_training_load_best_model_at_end_full_model(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
num_train_epochs=2,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
do_save_adapters=False,
do_save_full_model=True,
)
trainer.train()
self.assertIsNotNone(trainer.model.active_adapters)
if __name__ == "__main__":
unittest.main()
| 36.12766 | 119 | 0.643551 | [
"Apache-2.0"
] | AngadSethi/adapter-transformers | tests/test_adapter_trainer.py | 6,792 | Python |
from sklearn.metrics import roc_curve, auc
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import json
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from pylab import rcParams
# rcParams['figure.figsize'] = 20, 20
rcParams['figure.figsize'] = 15, 15
def results(x_true, x_pred, y_true, y_pred, classes, params, path=None, name=None):
if path is None and name is None:
path = f'models/{params["model_type"]}/{params["exp_name"]}/'
name = f'{params["model_type"]}-{params["exp_name"]}'
# Create folder
Path(path).mkdir(parents=True, exist_ok=True)
# Log
log_file = open(f'{path}log.json', "w")
json.dump(params, log_file, indent=4)
# Train results
x_pred_ = x_pred.argmax(dim=1)
#classification report
report = classification_report(x_true, x_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'train_accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'train_classification_report.csv')
# AUC curve
x_true_ohe = np.zeros((len(x_pred), len(classes)))
for idx, lbl in enumerate(x_true):
x_true_ohe[idx][lbl] = 1
x_pred = x_pred.detach().numpy()
plot_multiclass_roc(x_true_ohe,x_pred, classes=classes, path=path, name='train-'+name)
# Confusion matrix
cm = confusion_matrix(x_true, x_pred_)
plot_confusion_matrix(cm, classes, path=path, name='train-'+name)
# Test results
y_pred_ = y_pred.argmax(dim=1)
#classification report
report = classification_report(y_true, y_pred_, target_names=classes,output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
accuracy_report = df_classification_report.tail(3)
accuracy_report.to_csv(path+'test-accuracy_report.csv')
df_classification_report.drop(df_classification_report.tail(3).index, inplace=True)
df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
df_classification_report.to_csv(path+'test-classification_report.csv')
# AUC curve
y_true_ohe = np.zeros((len(y_pred), len(classes)))
for idx, lbl in enumerate(y_true):
y_true_ohe[idx][lbl] = 1
y_pred = y_pred.detach().numpy()
plot_multiclass_roc(y_true_ohe,y_pred, classes=classes, path=path, name='test-'+name)
# Confusion matrix
cm = confusion_matrix(y_true, y_pred_)
plot_confusion_matrix(cm, classes, path=path, name='test-'+name)
# plot_confusion_matrix(cm, list(range(len(classes))), path=path, name='test-'+name)
def get_color(idx):
if idx < 10:
return '#f500dc'
elif idx < 20:
return '#00f500'
elif idx < 30:
return '#00e0f5'
elif idx < 40:
return '#000cf5'
elif idx < 50:
return '#f5e900'
elif idx < 60:
return '#f58f00'
else:
return '#f50000'
def plot_multiclass_roc(y_true, y_pred, classes, path, name):
n_classes = len(classes)
lw=1
items = []
labels = ['item_id', 'fpr', 'tpr', 'roc_auc']
for i in range(n_classes):
fpr, tpr, _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc = auc(fpr, tpr)
items.append((i, fpr, tpr, roc_auc))
df = pd.DataFrame.from_records(items, columns=labels)
df = df.sort_values(by=['roc_auc'], ascending=False)
for idx, (_, row) in enumerate(df.iterrows()):
color = get_color(idx)
plt.plot(row['fpr'], row['tpr'], lw=lw, color=color,
label=f'{classes[row["item_id"]]} (area = {row["roc_auc"]:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(f'Receiver operating characteristic for {name}')
plt.legend(loc='lower right',
fancybox=True, shadow=True, ncol=3, prop={'size': 12})
plt.savefig(f'{path}{name}-roc.png', bbox_inches='tight')
plt.clf()
plt.close()
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(shrink=0.75)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title(f'Confusion Matrix for {name}')
plt.savefig(f'{path}{name}-cm.png', bbox_inches='tight')
plt.clf()
plt.close() | 34.729167 | 113 | 0.676065 | [
"MIT"
] | buddhi1/CIFAR-10-project | utils.py | 5,001 | Python |
import datetime
import os
import re
import ujson
from django.conf import settings
from django.http import HttpResponse
from django.test import override_settings
from mock import MagicMock, patch
import urllib
from typing import Any, Dict, List
from zerver.lib.actions import do_create_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
HostRequestMock, queries_captured, get_user_messages
)
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.test_runner import slow
from zerver.models import (
get_realm, get_stream, get_user, UserProfile, UserMessage, Recipient,
flush_per_request_caches, DefaultStream, Realm,
)
from zerver.views.home import home, sent_time_in_epoch_seconds
class HomeTest(ZulipTestCase):
def test_home(self) -> None:
# Keep this list sorted!!!
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Keyboard shortcuts',
'Loading...',
'Manage streams',
'Narrow by topic',
'Next message',
'Search streams',
'Welcome to Zulip',
# Verify that the app styles get included
'app-stubentry.js',
'var page_params',
]
# Keep this list sorted!!!
expected_keys = [
"alert_words",
"avatar_source",
"avatar_url",
"avatar_url_medium",
"bot_types",
"can_create_streams",
"cross_realm_bots",
"custom_profile_field_types",
"custom_profile_fields",
"debug_mode",
"default_language",
"default_language_name",
"delivery_email",
"dense_mode",
"development_environment",
"email",
"emojiset",
"emojiset_choices",
"enable_desktop_notifications",
"enable_digest_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enable_online_push_notifications",
"enable_sounds",
"enable_stream_desktop_notifications",
"enable_stream_email_notifications",
"enable_stream_push_notifications",
"enable_stream_sounds",
"enter_sends",
"first_in_realm",
"full_name",
"furthest_read_time",
"has_mobile_devices",
"have_initial_messages",
"high_contrast_mode",
"hotspots",
"initial_servertime",
"is_admin",
"is_guest",
"jitsi_server_url",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"max_avatar_file_size",
"max_icon_file_size",
"max_message_id",
"maxfilesize",
"message_content_in_email_notifications",
"muted_topics",
"narrow",
"narrow_stream",
"needs_tutorial",
"never_subscribed",
"night_mode",
"password_min_guesses",
"password_min_length",
"pm_content_in_desktop_notifications",
"pointer",
"poll_timeout",
"presences",
"prompt_for_invites",
"queue_id",
"realm_add_emoji_by_admins_only",
"realm_allow_community_topic_editing",
"realm_allow_edit_history",
"realm_allow_message_deleting",
"realm_allow_message_editing",
"realm_authentication_methods",
"realm_available_video_chat_providers",
"realm_bot_creation_policy",
"realm_bot_domain",
"realm_bots",
"realm_create_stream_by_admins_only",
"realm_default_language",
"realm_default_stream_groups",
"realm_default_streams",
"realm_default_twenty_four_hour_time",
"realm_description",
"realm_disallow_disposable_email_addresses",
"realm_domains",
"realm_email_auth_enabled",
"realm_email_changes_disabled",
"realm_embedded_bots",
"realm_emoji",
"realm_filters",
"realm_google_hangouts_domain",
"realm_icon_source",
"realm_icon_url",
"realm_inline_image_preview",
"realm_inline_url_embed_preview",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_is_zephyr_mirror_realm",
"realm_mandatory_topics",
"realm_message_content_delete_limit_seconds",
"realm_message_content_edit_limit_seconds",
"realm_message_retention_days",
"realm_name",
"realm_name_changes_disabled",
"realm_name_in_notifications",
"realm_non_active_users",
"realm_notifications_stream_id",
"realm_password_auth_enabled",
"realm_presence_disabled",
"realm_push_notifications_enabled",
"realm_restricted_to_domain",
"realm_send_welcome_emails",
"realm_show_digest_email",
"realm_signup_notifications_stream_id",
"realm_uri",
"realm_user_groups",
"realm_users",
"realm_video_chat_provider",
"realm_waiting_period_threshold",
"root_domain_uri",
"save_stacktraces",
"search_pills_enabled",
"server_generation",
"server_inline_image_preview",
"server_inline_url_embed_preview",
"stream_description_max_length",
"stream_name_max_length",
"subscriptions",
"test_suite",
"timezone",
"translate_emoticons",
"translation_data",
"twenty_four_hour_time",
"two_fa_enabled",
"two_fa_enabled_user",
"unread_msgs",
"unsubscribed",
"use_websockets",
"user_id",
"warn_no_email",
"zulip_version",
]
email = self.example_email("hamlet")
# Verify fails if logged-out
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
self.login(email)
# Create bot for realm_bots testing. Must be done before fetching home_page.
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
self.client_post("/json/bots", bot_info)
# Verify succeeds once logged-in
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page(stream='Denmark')
self.assert_length(queries, 41)
self.assert_length(cache_mock.call_args_list, 7)
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
raise AssertionError('%s not in result' % (html_bit,))
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
# TODO: Inspect the page_params data further.
# print(ujson.dumps(page_params, indent=2))
realm_bots_expected_keys = [
'api_key',
'avatar_url',
'bot_type',
'default_all_public_streams',
'default_events_register_stream',
'default_sending_stream',
'email',
'full_name',
'is_active',
'owner',
'services',
'user_id',
]
realm_bots_actual_keys = sorted([str(key) for key in page_params['realm_bots'][0].keys()])
self.assertEqual(realm_bots_actual_keys, realm_bots_expected_keys)
def test_home_under_2fa_without_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
self.login(self.example_email("iago"))
result = self._get_home_page()
# Should be successful because otp device is not configured.
self.assertEqual(result.status_code, 200)
def test_home_under_2fa_with_otp_device(self) -> None:
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
user_profile = self.example_user('iago')
self.create_default_device(user_profile)
self.login(user_profile.email)
result = self._get_home_page()
# User should not log in because otp device is configured but
# 2fa login function was not called.
self.assertEqual(result.status_code, 302)
self.login_2fa(user_profile)
result = self._get_home_page()
# Should be successful after calling 2fa login function.
self.assertEqual(result.status_code, 200)
def test_num_queries_for_realm_admin(self) -> None:
# Verify number of queries for Realm admin isn't much higher than for normal users.
self.login(self.example_email("iago"))
flush_per_request_caches()
with queries_captured() as queries:
with patch('zerver.lib.cache.cache_set') as cache_mock:
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_length(cache_mock.call_args_list, 6)
self.assert_length(queries, 37)
@slow("Creates and subscribes 10 users in a loop. Should use bulk queries.")
def test_num_queries_with_streams(self) -> None:
main_user = self.example_user('hamlet')
other_user = self.example_user('cordelia')
realm_id = main_user.realm_id
self.login(main_user.email)
# Try to make page-load do extra work for various subscribed
# streams.
for i in range(10):
stream_name = 'test_stream_' + str(i)
stream = self.make_stream(stream_name)
DefaultStream.objects.create(
realm_id=realm_id,
stream_id=stream.id
)
for user in [main_user, other_user]:
self.subscribe(user, stream_name)
# Simulate hitting the page the first time to avoid some noise
# related to initial logins.
self._get_home_page()
# Then for the second page load, measure the number of queries.
flush_per_request_caches()
with queries_captured() as queries2:
result = self._get_home_page()
self.assert_length(queries2, 35)
# Do a sanity check that our new streams were in the payload.
html = result.content.decode('utf-8')
self.assertIn('test_stream_7', html)
def _get_home_page(self, **kwargs: Any) -> HttpResponse:
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result: HttpResponse) -> Dict[str, Any]:
html = result.content.decode('utf-8')
lines = html.split('\n')
page_params_line = [l for l in lines if re.match(r'^\s*var page_params', l)][0]
page_params_json = page_params_line.split(' = ')[1].rstrip(';')
page_params = ujson.loads(page_params_json)
return page_params
def _sanity_check(self, result: HttpResponse) -> None:
'''
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
'''
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
raise AssertionError('Home page probably did not load.')
def test_terms_of_service(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login(email)
for user_tos_version in [None, '1.1', '2.0.3.4']:
user.tos_version = user_tos_version
user.save()
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('There are new Terms of Service', html)
def test_terms_of_service_first_time_template(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login(email)
user.tos_version = None
user.save()
with \
self.settings(FIRST_TIME_TOS_TEMPLATE='hello.html'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
self.assert_in_response("most productive group chat", result)
def test_accept_terms_of_service(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_post('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
self.assert_in_response("I agree to the", result)
result = self.client_post('/accounts/accept_terms/', {'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result['Location'], '/')
def test_bad_narrow(self) -> None:
email = self.example_email("hamlet")
self.login(email)
with patch('logging.exception') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once()
self.assertEqual(mock.call_args_list[0][0][0], "Narrow parsing exception")
self._sanity_check(result)
def test_bad_pointer(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
user_profile.pointer = 999999
user_profile.save()
self.login(email)
with patch('logging.warning') as mock:
result = self._get_home_page()
mock.assert_called_once_with('[email protected] has invalid pointer 999999')
self._sanity_check(result)
def test_topic_narrow(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
def test_notifications_stream(self) -> None:
email = self.example_email("hamlet")
realm = get_realm('zulip')
realm.notifications_stream_id = get_stream('Denmark', realm).id
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_notifications_stream_id'], get_stream('Denmark', realm).id)
def create_bot(self, owner: UserProfile, bot_email: str, bot_name: str) -> UserProfile:
user = do_create_user(
email=bot_email,
password='123',
realm=owner.realm,
full_name=bot_name,
short_name=bot_name,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=owner
)
return user
def create_non_active_user(self, realm: Realm, email: str, name: str) -> UserProfile:
user = do_create_user(
email=email,
password='123',
realm=realm,
full_name=name,
short_name=name,
)
# Doing a full-stack deactivation would be expensive here,
# and we really only need to flip the flag to get a valid
# test.
user.is_active = False
user.save()
return user
def test_signup_notifications_stream(self) -> None:
email = self.example_email("hamlet")
realm = get_realm('zulip')
realm.signup_notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['realm_signup_notifications_stream_id'], get_stream('Denmark', realm).id)
@slow('creating users and loading home page')
def test_people(self) -> None:
hamlet = self.example_user('hamlet')
realm = get_realm('zulip')
self.login(hamlet.email)
for i in range(3):
self.create_bot(
owner=hamlet,
bot_email='bot-%[email protected]' % (i,),
bot_name='Bot %d' % (i,),
)
for i in range(3):
self.create_non_active_user(
realm=realm,
email='defunct-%[email protected]' % (i,),
name='Defunct User %d' % (i,),
)
result = self._get_home_page()
page_params = self._get_page_params(result)
'''
We send three lists of users. The first two below are disjoint
lists of users, and the records we send for them have identical
structure.
The realm_bots bucket is somewhat redundant, since all bots will
be in one of the first two buckets. They do include fields, however,
that normal users don't care about, such as default_sending_stream.
'''
buckets = [
'realm_users',
'realm_non_active_users',
'realm_bots',
]
for field in buckets:
users = page_params[field]
self.assertTrue(len(users) >= 3, field)
for rec in users:
self.assertEqual(rec['user_id'],
get_user(rec['email'], realm).id)
if field == 'realm_bots':
self.assertNotIn('is_bot', rec)
self.assertIn('is_active', rec)
self.assertIn('owner', rec)
else:
self.assertIn('is_bot', rec)
self.assertNotIn('is_active', rec)
active_emails = {p['email'] for p in page_params['realm_users']}
non_active_emails = {p['email'] for p in page_params['realm_non_active_users']}
bot_emails = {p['email'] for p in page_params['realm_bots']}
self.assertIn(hamlet.email, active_emails)
self.assertIn('[email protected]', non_active_emails)
# Bots can show up in multiple buckets.
self.assertIn('[email protected]', bot_emails)
self.assertIn('[email protected]', active_emails)
# Make sure nobody got mis-bucketed.
self.assertNotIn(hamlet.email, non_active_emails)
self.assertNotIn('[email protected]', active_emails)
cross_bots = page_params['cross_realm_bots']
self.assertEqual(len(cross_bots), 5)
cross_bots.sort(key=lambda d: d['email'])
notification_bot = self.notification_bot()
by_email = lambda d: d['email']
self.assertEqual(sorted(cross_bots, key=by_email), sorted([
dict(
user_id=get_user('[email protected]', get_realm('zulip')).id,
is_admin=False,
email='[email protected]',
full_name='Zulip New User Bot',
is_bot=True
),
dict(
user_id=get_user('[email protected]', get_realm('zulip')).id,
is_admin=False,
email='[email protected]',
full_name='Email Gateway',
is_bot=True
),
dict(
user_id=get_user('[email protected]', get_realm('zulip')).id,
is_admin=False,
email='[email protected]',
full_name='Zulip Feedback Bot',
is_bot=True
),
dict(
user_id=notification_bot.id,
is_admin=False,
email=notification_bot.email,
full_name='Notification Bot',
is_bot=True
),
dict(
user_id=get_user('[email protected]', get_realm('zulip')).id,
is_admin=False,
email='[email protected]',
full_name='Welcome Bot',
is_bot=True
),
], key=by_email))
def test_new_stream(self) -> None:
user_profile = self.example_user("hamlet")
stream_name = 'New stream'
self.subscribe(user_profile, stream_name)
self.login(user_profile.email)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['pointer'], -1)
self.assertEqual(page_params['max_message_id'], -1)
self.assertEqual(page_params['have_initial_messages'], False)
def test_invites_by_admins_only(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.is_realm_admin = True
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
def test_show_invites_for_guest_users(self) -> None:
user_profile = self.example_user('polonius')
email = user_profile.email
realm = user_profile.realm
realm.invite_by_admins_only = False
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
self.assertFalse(get_realm('zulip').invite_by_admins_only)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
def test_desktop_home(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_get("/desktop_home")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/desktop_home/"))
result = self.client_get("/desktop_home/")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result['Location']).path
self.assertEqual(path, "/")
def test_apps_view(self) -> None:
result = self.client_get('/apps')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'].endswith('/apps/'))
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 301)
self.assertTrue(result['Location'] == 'https://zulipchat.com/apps/')
with self.settings(ZILENCER_ENABLED=True):
result = self.client_get('/apps/')
self.assertEqual(result.status_code, 200)
html = result.content.decode('utf-8')
self.assertIn('Apps for every platform.', html)
def test_generate_204(self) -> None:
email = self.example_email("hamlet")
self.login(email)
result = self.client_get("/api/v1/generate_204")
self.assertEqual(result.status_code, 204)
def test_message_sent_time(self) -> None:
epoch_seconds = 1490472096
pub_date = datetime.datetime.fromtimestamp(epoch_seconds)
user_message = MagicMock()
user_message.message.pub_date = pub_date
self.assertEqual(sent_time_in_epoch_seconds(user_message), epoch_seconds)
def test_handlebars_compile_error(self) -> None:
request = HostRequestMock()
with self.settings(DEVELOPMENT=True, TEST_SUITE=False):
with patch('os.path.exists', return_value=True):
result = home(request)
self.assertEqual(result.status_code, 500)
self.assert_in_response('Error compiling handlebars templates.', result)
def test_subdomain_homepage(self) -> None:
email = self.example_email("hamlet")
self.login(email)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
with patch('zerver.views.home.get_subdomain', return_value=""):
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
self.assert_in_response('most productive group chat', result)
with patch('zerver.views.home.get_subdomain', return_value="subdomain"):
result = self._get_home_page()
self._sanity_check(result)
def send_test_message(self, content: str, sender_name: str='iago',
stream_name: str='Denmark', topic_name: str='foo') -> None:
sender = self.example_email(sender_name)
self.send_stream_message(sender, stream_name,
content=content, topic_name=topic_name)
def soft_activate_and_get_unread_count(self, stream: str='Denmark', topic: str='foo') -> int:
stream_narrow = self._get_home_page(stream=stream, topic=topic)
page_params = self._get_page_params(stream_narrow)
return page_params['unread_msgs']['count']
def test_unread_count_user_soft_deactivation(self) -> None:
# In this test we make sure if a soft deactivated user had unread
# messages before deactivation they remain same way after activation.
long_term_idle_user = self.example_user('hamlet')
self.login(long_term_idle_user.email)
message = 'Test Message 1'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 1)
query_count = len(queries)
user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(user_msg_list[-1].content, message)
self.logout()
do_soft_deactivate_users([long_term_idle_user])
self.login(long_term_idle_user.email)
message = 'Test Message 2'
self.send_test_message(message)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertNotEqual(idle_user_msg_list[-1].content, message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
# Test here for query count to be at least 5 greater than previous count
# This will assure indirectly that add_missing_messages() was called.
self.assertGreaterEqual(len(queries) - query_count, 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
@slow("Loads home page data several times testing different cases")
def test_multiple_user_soft_deactivations(self) -> None:
long_term_idle_user = self.example_user('hamlet')
# We are sending this message to ensure that long_term_idle_user has
# at least one UserMessage row.
self.send_test_message('Testing', sender_name='hamlet')
do_soft_deactivate_users([long_term_idle_user])
message = 'Test Message 1'
self.send_test_message(message)
self.login(long_term_idle_user.email)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 2)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 2'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 3)
# Test here for query count to be at least 5 less than previous count.
# This will assure add_missing_messages() isn't repeatedly called.
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
do_soft_deactivate_users([long_term_idle_user])
message = 'Test Message 3'
self.send_test_message(message)
self.login(long_term_idle_user.email)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 4)
query_count = len(queries)
long_term_idle_user.refresh_from_db()
self.assertFalse(long_term_idle_user.long_term_idle)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
message = 'Test Message 4'
self.send_test_message(message)
with queries_captured() as queries:
self.assertEqual(self.soft_activate_and_get_unread_count(), 5)
self.assertGreaterEqual(query_count - len(queries), 5)
idle_user_msg_list = get_user_messages(long_term_idle_user)
self.assertEqual(idle_user_msg_list[-1].content, message)
self.logout()
def test_url_language(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login(user.email)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
with \
patch('zerver.lib.events.request_event_queue', return_value=42), \
patch('zerver.lib.events.get_user_events', return_value=[]):
result = self.client_get('/de/')
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
# TODO: Verify that the actual language we're using in the
# translation data is German.
def test_translation_data(self) -> None:
user = self.example_user("hamlet")
user.default_language = 'es'
user.save()
self.login(user.email)
result = self._get_home_page()
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
self.assertEqual(page_params['default_language'], 'es')
| 38.802747 | 110 | 0.61758 | [
"Apache-2.0"
] | rhencke/zulip | zerver/tests/test_home.py | 31,081 | Python |
################################################
# backend.py is part of COVID.codelongandpros.repl.co
# You should have recieved a copy of the three-clause BSD license.
# If you did not, it is located at:
# https://opensource.org/licenses/BSD-3-Clause
# Made by Scott Little, with help from StackOverflow
################################################
import csv
import matplotlib.pyplot as plt
from imageio import imwrite
def get_file():
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'
import requests
r = requests.get(url)
with open('cases.csv', 'wb') as f:
f.write(r.content)
# Retrieve HTTP meta-data
print(r.status_code)
print(r.headers['content-type'])
print(r.encoding)
def get_cases(stat):
x = []
y = []
d = [0]
dx = [0]
if len(stat) == 0:
return 1
dat = 0
state = stat
reader = csv.DictReader(open("cases.csv"))
for raw in reader:
if raw['state'] == state:
dat+=1
x.append(dat)
dx.append(dat)
y.append(raw['cases'])
d.append(raw['deaths'])
else:
continue
fig, axs = plt.subplots(2,figsize=(12,10))
fig.suptitle(f"COVID-19 Cases/Deaths in {stat}")
axs[0].plot(x, y)
axs[1].plot(dx, d)
axs[0].set_ylabel('Cases')
axs[1].set_ylabel("Deaths")
for axe in axs:
axe.set_xlabel("Days since 2020-01-21")
plt.savefig('static/plots/plot.png', bbox_inches='tight', dpi=400)
return 0
def overwrite():
import numpy as np
img = np.zeros([100,100,3],dtype=np.uint8)
img.fill(255) # or img[:] = 255
imwrite('static/plots/plot.png', img) | 24.96875 | 86 | 0.612641 | [
"BSD-3-Clause"
] | CodeLongAndProsper90/COVID | backend.py | 1,598 | Python |
import unittest
from programy.services.lookups.countrycodes import CountryCodes
class CountryCodesTests(unittest.TestCase):
def test_names(self):
self.assertEqual(['Ukraine', 'UA', 'UKR', '804'], CountryCodes.NAMES['Ukraine'])
def test_two_digits(self):
self.assertEqual(['Ukraine', 'UA', 'UKR', '804'], CountryCodes.TWO_DIGITS['UA'])
def test_three_digits(self):
self.assertEqual(['Ukraine', 'UA', 'UKR', '804'], CountryCodes.THREE_DIGITS['UKR'])
def test_numeric(self):
self.assertEqual(['Ukraine', 'UA', 'UKR', '804'], CountryCodes.NUMERIC['804'])
| 33.611111 | 91 | 0.67438 | [
"MIT"
] | RonKhondji/program-y | test/programytest/services/lookups/test_countrycodes.py | 605 | Python |
from . import *
#classes
from ._addon import _Addon
from ._plugin_instance import _PluginInstance
from ._plugin import _Plugin
#folders
from . import _util
from . import _structure
from . import _ui
from . import _network
from . import _volumetric
from . import _macro | 22.333333 | 45 | 0.798507 | [
"MIT"
] | devanshuDesai/nanome | nanome/_internal/__init__.py | 268 | Python |
import numpy as np
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the boxes are integers, convert them to floats (due to divisions)
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
scores = boxes[:,4]
# compute the area of the boxes and sort the boxes by their score
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(scores)[::-1]
# keep looking while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add its value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest coordinates for the start of the overlap area and the smallest coordinates for the end
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the overlap
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the list that have an overlap over the threshold
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the boxes that were picked
return boxes[pick].astype("float")
| 24.126761 | 108 | 0.615879 | [
"MIT"
] | dahem/coffe-images | coffee-maturation/src/models/non_maximum.py | 1,713 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.