max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tools/prepare_iata_airline_dump_file.py | mtrampont/opentraveldata | 208 | 26184 | <reponame>mtrampont/opentraveldata
#!/usr/bin/env python
import getopt, sys, io
import pandas as pd
#
# Usage
#
def usage (script_name):
"""
Display the usage.
"""
print ("")
print ("Usage: {} [options]".format(script_name))
print ("")
print ("That script transforms and filter a fix width data file into a hat symbol separated CSV one")
print ("")
print ("Options:")
print (" -h, --help : outputs this help and exits")
print (" -v, --verbose : verbose output (debugging)")
print (" -i, --input <input data file-path>")
print (" -o, --output <output data file-path>")
print ("")
#
# Command-line arguments
#
def handle_opt():
"""
Handle the command-line options
"""
try:
opts, args = getopt.getopt (sys.argv[1:], "hv:i:o:",
["help", "verbose", "input", "output"])
except (getopt.GetoptError, err):
# Print help information and exit. It will print something like
# "option -d not recognized"
print (str (err))
usage (sys.argv[0], usage_doc)
sys.exit(2)
# Options
verboseFlag = False
airline_input_filepath = ''
airline_output_filepath = ''
airline_input_file = sys.stdin # '/dev/stdin'
airline_output_file = sys.stdout # '/dev/stdout'
# Input stream/file
if len (args) != 0:
airline_input_filepath = args[0]
# Handling
for o, a in opts:
if o in ("-h", "--help"):
usage (sys.argv[0])
sys.exit()
elif o in ("-v", "--verbose"):
verboseFlag = True
elif o in ("-i", "--input"):
airline_input_filepath = a
elif o in ("-o", "--output"):
airline_output_filepath = a
else:
raise ValueError ("That option ({}) is unknown. Rerun that script with the -h option to see the accepted options".format(o))
# Input file. That file may be compressed with GNU Zip (gzip)
if (airline_input_filepath != ''):
airline_input_file = open (airline_input_filepath, 'rb')
# Output file-path
if (airline_output_filepath != ''):
airline_output_file = open (airline_output_filepath, 'w')
# Report the configuration
airline_input_filepath_str = airline_input_filepath \
if airline_input_filepath != '' \
else 'Standard input'
airline_output_filepath_str = airline_output_filepath \
if airline_output_filepath != '' \
else 'Standard output'
if (airline_output_filepath_str != 'Standard output'):
print ("Input data file: '{}'".format(airline_input_filepath_str))
print ("Output data file: '{}'".format(airline_output_filepath_str))
#
return (verboseFlag, airline_input_filepath, airline_output_file)
def extract_df (airline_input_filepath):
"""
Parse a fix width data file containing details
about IATA referenced airlines, and fill in a Pandas data-frame
"""
# Using Pandas with column specification
col_names = ['name', 'num_code', '3char_code', '2char_code',
'address_street_1', 'address_street_2', 'address_city_name',
'address_state_name', 'address_country_name',
'address_postal_code',
'flag_1', 'flag_2', 'flag_3', 'flag_4', 'type',
'num_code_2']
col_specs = [(0, 80), (80, 84), (84, 87), (87, 90),
(90, 130), (130, 170), (170, 195),
(195, 215), (215, 259),
(259, 373),
(373, 374), (374, 375), (375, 376), (376, 377), (377, 379),
(379, 385)]
col_converters = {
'num_code': lambda x: str(int(x)),
'num_code_2': lambda x: str(int(x))}
airline_df = pd.read_fwf(airline_input_filepath,
colspecs = col_specs, header = None,
names = col_names, converters = col_converters)
# Leave empty fields empty (otherwise, Pandas specifies NaN)
airline_df.fillna (value = '', method = None, inplace = True)
# Merge num_code and num_code2
airline_df['num_code'] = airline_df \
.apply(lambda r: r['num_code'] if r['num_code'] != '' else r['num_code_2'],
axis = 1)
# DEBUG
#print (str(airline_df.head()))
#print (str(airline_df.dtypes))
#
return (airline_df)
def dump_to_csv (airline_df, airline_output_file):
"""
Dump a sub-set of the the Pandas data-frame into a CSV file.
The field delimiter is the hat symbol ('^').
"""
subcol_names = ['2char_code', '3char_code', 'num_code', 'name', 'type']
# DEBUG
#airline_spec_df = airline_df[airline_df['2char_code'] == 'LH'][subcol_names]
#print (str(airline_spec_df))
# Sort by IATA and ICAO codes
airline_df.sort_values(['2char_code', '3char_code', 'num_code', 'name'],
ascending = True, inplace = True)
# Dump the data-frame into a CSV file
airline_df.to_csv (airline_output_file, sep = '^', columns = subcol_names,
header = True, index = False, doublequote = False,
quotechar = '|')
#
# Main
#
def main():
"""
Main
"""
# Parse command options
(verboseFlag, airline_input_filepath, airline_output_file) = handle_opt()
# DEBUG
#print ("Type of file: '{}'".format(type(airline_input_filepath)))
# Parse the fixed width data file of airline details
airline_df = extract_df (airline_input_filepath)
# Dump the Pandas data-frame into a CSV file
dump_to_csv (airline_df, airline_output_file)
#
# Main, when launched from a library
#
if __name__ == "__main__":
main()
|
tests/test_functions/http_log_exception/main.py | KaylaNguyen/functions-framework-python | 479 | 26194 | <filename>tests/test_functions/http_log_exception/main.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used in Worker tests of legacy GCF Python 3.7 logging."""
import logging
X_GOOGLE_FUNCTION_NAME = "gcf-function"
X_GOOGLE_ENTRY_POINT = "function"
HOME = "/tmp"
def function(request):
"""Test function which logs exceptions.
Args:
request: The HTTP request which triggered this function.
"""
try:
raise Exception
except:
logging.exception("log")
return None
|
swift/internal/swift_c_module.bzl | BalestraPatrick/rules_swift | 215 | 26212 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the `swift_c_module` rule."""
load(":swift_common.bzl", "swift_common")
load(":utils.bzl", "merge_runfiles")
def _swift_c_module_impl(ctx):
module_map = ctx.file.module_map
deps = ctx.attr.deps
cc_infos = [dep[CcInfo] for dep in deps]
data_runfiles = [dep[DefaultInfo].data_runfiles for dep in deps]
default_runfiles = [dep[DefaultInfo].default_runfiles for dep in deps]
if cc_infos:
cc_info = cc_common.merge_cc_infos(cc_infos = cc_infos)
compilation_context = cc_info.compilation_context
else:
cc_info = None
compilation_context = cc_common.create_compilation_context()
providers = [
# We must repropagate the dependencies' DefaultInfos, otherwise we
# will lose runtime dependencies that the library expects to be
# there during a test (or a regular `bazel run`).
DefaultInfo(
data_runfiles = merge_runfiles(data_runfiles),
default_runfiles = merge_runfiles(default_runfiles),
files = depset([module_map]),
),
swift_common.create_swift_info(
modules = [
swift_common.create_module(
name = ctx.attr.module_name,
clang = swift_common.create_clang_module(
compilation_context = compilation_context,
module_map = module_map,
# TODO(b/142867898): Precompile the module and place it
# here.
precompiled_module = None,
),
),
],
),
]
if cc_info:
providers.append(cc_info)
return providers
swift_c_module = rule(
attrs = {
"module_map": attr.label(
allow_single_file = True,
doc = """\
The module map file that should be loaded to import the C library dependency
into Swift.
""",
mandatory = True,
),
"module_name": attr.string(
doc = """\
The name of the top-level module in the module map that this target represents.
A single `module.modulemap` file can define multiple top-level modules. When
building with implicit modules, the presence of that module map allows any of
the modules defined in it to be imported. When building explicit modules,
however, there is a one-to-one correspondence between top-level modules and
BUILD targets and the module name must be known without reading the module map
file, so it must be provided directly. Therefore, one may have multiple
`swift_c_module` targets that reference the same `module.modulemap` file but
with different module names and headers.
""",
mandatory = True,
),
"deps": attr.label_list(
allow_empty = False,
doc = """\
A list of C targets (or anything propagating `CcInfo`) that are dependencies of
this target and whose headers may be referenced by the module map.
""",
mandatory = True,
providers = [[CcInfo]],
),
},
doc = """\
Wraps one or more C targets in a new module map that allows it to be imported
into Swift to access its C interfaces.
The `cc_library` rule in Bazel does not produce module maps that are compatible
with Swift. In order to make interop between Swift and C possible, users have
one of two options:
1. **Use an auto-generated module map.** In this case, the `swift_c_module`
rule is not needed. If a `cc_library` is a direct dependency of a
`swift_{binary,library,test}` target, a module map will be automatically
generated for it and the module's name will be derived from the Bazel target
label (in the same fashion that module names for Swift targets are derived).
The module name can be overridden by setting the `swift_module` tag on the
`cc_library`; e.g., `tags = ["swift_module=MyModule"]`.
2. **Use a custom module map.** For finer control over the headers that are
exported by the module, use the `swift_c_module` rule to provide a custom
module map that specifies the name of the module, its headers, and any other
module information. The `cc_library` targets that contain the headers that
you wish to expose to Swift should be listed in the `deps` of your
`swift_c_module` (and by listing multiple targets, you can export multiple
libraries under a single module if desired). Then, your
`swift_{binary,library,test}` targets should depend on the `swift_c_module`
target, not on the underlying `cc_library` target(s).
NOTE: Swift at this time does not support interop directly with C++. Any headers
referenced by a module map that is imported into Swift must have only C features
visible, often by using preprocessor conditions like `#if __cplusplus` to hide
any C++ declarations.
""",
implementation = _swift_c_module_impl,
)
|
mamonsu/plugins/system/linux/__init__.py | sgrinko/mamonsu | 188 | 26278 | __all__ = [
'proc_stat', 'disk_stats', 'disk_sizes',
'memory', 'uptime', 'open_files', 'net', 'la'
,'pg_probackup'
]
from . import *
|
src/ralph/networks/migrations/0010_auto_20170216_1230.py | DoNnMyTh/ralph | 1,668 | 26296 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('networks', '0009_auto_20160823_0921'),
]
operations = [
migrations.AlterField(
model_name='network',
name='gateway',
field=models.ForeignKey(to='networks.IPAddress', blank=True, on_delete=django.db.models.deletion.SET_NULL, verbose_name='Gateway address', null=True, related_name='gateway_network'),
),
]
|
tacker/api/views/__init__.py | takahashi-tsc/tacker | 116 | 26300 | # Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.api.common import attribute_filter
from tacker.common import exceptions as exception
class BaseViewBuilder(object):
@classmethod
def validate_filter(cls, filters=None):
if not filters:
return
return attribute_filter.parse_filter_rule(filters,
target=cls.FLATTEN_ATTRIBUTES)
@classmethod
def validate_attribute_fields(cls, all_fields=None, fields=None,
exclude_fields=None, exclude_default=None):
if all_fields and (fields or exclude_fields or exclude_default):
msg = ("Invalid query parameter combination: 'all_fields' "
"cannot be combined with 'fields' or 'exclude_fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if fields and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'fields' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
if exclude_fields and (all_fields or fields or exclude_default):
msg = ("Invalid query parameter combination: 'exclude_fields' "
"cannot be combined with 'all_fields' or 'fields' "
"or 'exclude_default'")
raise exception.ValidationError(msg)
if exclude_default and (all_fields or exclude_fields):
msg = ("Invalid query parameter combination: 'exclude_default' "
"cannot be combined with 'all_fields' or 'exclude_fields' ")
raise exception.ValidationError(msg)
def _validate_complex_attributes(query_parameter, fields):
msg = ("Invalid query parameter '%(query_parameter)s'. "
"Value: %(field)s")
for field in fields:
if field in cls.COMPLEX_ATTRIBUTES:
continue
elif '*' in field:
# Field should never contain '*' as it's reserved for
# special purpose for handling key-value pairs.
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
elif field not in cls.FLATTEN_COMPLEX_ATTRIBUTES:
# Special case for field with key-value pairs.
# In this particular case, key will act as an attribute
# in structure so you need to treat it differently than
# other fields. All key-value pair field will be post-fix
# with '*' in FLATTEN_COMPLEX_ATTRIBUTES. Request
# with field which contains '*' will be treated as an
# error.
special_field = False
for attribute in cls.FLATTEN_COMPLEX_ATTRIBUTES:
if '*' in attribute and field.startswith(
attribute.split('*')[0]):
special_field = True
if not special_field:
raise exception.ValidationError(msg %
{"query_parameter": query_parameter,
"field": field})
if fields:
_validate_complex_attributes("fields", fields.split(','))
elif exclude_fields:
_validate_complex_attributes("exclude_fields",
exclude_fields.split(","))
|
vkwave/vkscript/__init__.py | krasnovmv/vkwave | 222 | 26312 | <reponame>krasnovmv/vkwave<gh_stars>100-1000
import vkwave.vkscript.handlers.assignments
import vkwave.vkscript.handlers.blocks
import vkwave.vkscript.handlers.calls
import vkwave.vkscript.handlers.expressions
import vkwave.vkscript.handlers.statements
import vkwave.vkscript.handlers.types
from .converter import VKScriptConverter
from .execute import Execute
from .execute import execute
__all__ = ("execute", "Execute", "VKScriptConverter")
|
validation_tests/case_studies/merewether/plot_results.py | samcom12/anuga_core | 136 | 26319 | from anuga.utilities import plot_utils as util
from matplotlib import pyplot as pyplot
import numpy
verbose= True
swwfile = 'merewether_1m.sww'
p=util.get_output(swwfile)
p2=util.get_centroids(p)
# Time index at last time
tindex = len(p2.time)-1
if verbose: print('calculating experimental transect')
x_data = [ 0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0]
#vel = [ 0.0, 0.0, 1.1, 3.2, 3.4, 2.4, 3.2, 3.2, 3.7, 3.1, 0.4, 0.0]
vel_data = [ 0.0, 0.4, 3.1, 3.7, 3.2, 3.2, 2.4, 3.4, 3.2, 1.1, 0.0, 0.0]
#depth = [ 0.0, 0.0, 0.1, 0.5, 0.45, 0.4, 0.55, 0.1, 0.1, 0.05, 0.04, 0.0]
depth_data = [ 0.0, 0.04, 0.05, 0.1, 0.1, 0.55, 0.4, 0.45, 0.5, 0.1, 0.0, 0.0]
from scipy import interpolate
fvel = interpolate.interp1d(x_data, vel_data)
fdepth = interpolate.interp1d(x_data, depth_data)
if verbose: print('calculating model heights at observation points')
# Get nearest wet points to 'point observations'
point_observations = numpy.genfromtxt(
'Observations/ObservationPoints.csv',
delimiter=",",skip_header=1)
nearest_points = point_observations[:,0]*0. - 1
for i in range(len(nearest_points)):
# Compute distance of ANUGA points to observation, and
# if the ANUGA point is dry then add a large value
# Then find index of minimum
n = ( (p2.x+p2.xllcorner-point_observations[i,0])**2 + \
(p2.y+p2.yllcorner-point_observations[i,1])**2 + \
(p2.stage[tindex,:] <= p2.elev)*1.0e+06).argmin()
nearest_points[i] = n
f = open('Stage_point_comparison.csv','w')
f.writelines( 'Field, ANUGA, TUFLOW, ANUGA minus Field, ANUGA minus TUFLOW \n' )
if verbose: print nearest_points.tolist()
for i in range(len(nearest_points)):
po = point_observations[i,-2]
tu = point_observations[i,-1]
anuga_data = p2.stage[tindex, nearest_points.tolist()[i]]
newline = str(round(po,2)) + ', ' + str(round(anuga_data,2)) + ', ' + str(tu) + ', ' + \
str(round(anuga_data - po,2)) + ', ' + str(round(anuga_data - tu,2)) + '\n'
f.writelines(newline)
f.flush()
f.close()
if verbose: print('Plot transect')
## Plot transect 1 [need to guess appropriate end points as these are not so
## clear from the report]
xx=util.near_transect(p2,[103, 100.], [130.,80.],tol=0.5)
xx2=xx[0]
pyplot.clf()
pyplot.figure(figsize=(16,10.5))
pyplot.subplot(121)
pyplot.scatter(p2.x, p2.y, c=p2.elev,edgecolors='none')
# Add nice elevation data
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='none')
pyplot.gca().set_aspect('equal')
pyplot.scatter(p2.x[xx2],p2.y[xx2],color='green')
pyplot.xlim( (40., 160.))
pyplot.ylim( (0.,140.))
pyplot.title('Transect points in green')
pyplot.subplot(222)
pyplot.scatter(xx[1],p2.vel[tindex,xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fvel(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final flow speed along the transect')
pyplot.subplot(224)
pyplot.scatter(xx[1],p2.stage[tindex,xx[0]]-p2.elev[xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fdepth(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final depth along the transect')
pyplot.savefig('Transect1.png', bbox_inches='tight')
if verbose: print('Plot velocity field')
pyplot.clf()
# Velocity vector plot
pyplot.figure(figsize=(16,22))
pyplot.scatter(p2.x,p2.y,c=(p2.elev>24.),edgecolors='none', s=0.2)
pyplot.gca().set_aspect('equal')
pyplot.xlim((100,180))
pyplot.ylim((100,210))
#k=range(0,len(p2.x),2) # Thin out the vectors for easier viewing
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='white')
k = range(len(p2.x))
# Thin out the triangles
#k = (((10.*(p2.x - p2.x.round())).round()%2 == 0.0)*((10.*(p2.y - p2.y.round())).round()%2 == 0.0)).nonzero()[0]
pyplot.quiver(p2.x[k],p2.y[k],p2.xvel[tindex,k], p2.yvel[tindex,k],
scale_units='xy',units='xy',width=0.1,
color='black',scale=1.0)
pyplot.savefig('velocity_stationary.png',dpi=100, bbox_inches='tight')
## Froude number plot
if verbose: print('Plot Froude number plot')
pyplot.clf()
pyplot.figure(figsize=(6,8))
froude_number = p2.vel[tindex]/(numpy.maximum(p2.height[tindex], 1.0e-03)*9.8)**0.5
froude_category = (froude_number>1.).astype(float) + (froude_number > 0.).astype(float)
pyplot.scatter(p2.x,p2.y,edgecolors='none', s=0.2)
## Fake additions to plot to hack matplotlib legend
pyplot.scatter(0.,0., color='FireBrick',label='>1', marker='s')
pyplot.scatter(0.,0., color='PaleGreen',label='0-1', marker='s')
pyplot.scatter(0.,0., color='blue',label='0',marker='s')
pyplot.gca().set_aspect('equal')
util.plot_triangles(p, values = froude_category, edgecolors='none')
pyplot.xlim((p.x.min(), p.x.max()))
pyplot.ylim((p.y.min(), p.y.max()))
pyplot.title("Froude Number zones: 0, (0,1], or >1")
import matplotlib.patches as mpatches
#red_patch = mpatches.Patch(color='red', label='>1')
#green_patch = mpatches.Patch(color='green', label='(0-1]')
#blue_patch = mpatches.Patch(color='blue', label='0.')
#pyplot.legend(handles=[red_patch, green_patch, blue_patch], labels=['>1', '(0-1]', '0.'], loc='best')
pyplot.legend(loc='upper left')
pyplot.savefig('froudeNumber.png',dpi=100,bbox_inches='tight')
|
test/test_conf.py | nandub/Limnoria | 476 | 26329 | ##
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.registry as registry
import supybot.ircutils as ircutils
class SupyConfTestCase(SupyTestCase):
def testJoinToOneChannel(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
channels.add("#bar")
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
self.assertEqual(msgs[0].args, ("#bar",))
conf.supybot.networks.test.channels.setValue(orig)
def testJoinToManyChannels(self):
orig = conf.supybot.networks.test.channels()
channels = ircutils.IrcSet()
input_list = []
for x in range(1, 30):
name = "#verylongchannelname" + str(x)
channels.add(name)
input_list.append(name)
conf.supybot.networks.test.channels.setValue(channels)
msgs = conf.supybot.networks.test.channels.joins()
# Double check we split the messages
self.assertEqual(len(msgs), 2)
# Ensure all channel names are present
chan_list = (msgs[0].args[0] + ',' + msgs[1].args[0]).split(',')
self.assertCountEqual(input_list, chan_list)
conf.supybot.networks.test.channels.setValue(orig)
|
qf_lib/documents_utils/document_exporting/element/custom.py | webclinic017/qf-lib | 198 | 26352 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.common.enums.grid_proportion import GridProportion
from qf_lib.documents_utils.document_exporting.document import Document
from qf_lib.documents_utils.document_exporting.element import Element
class CustomElement(Element):
def __init__(self, html: str, grid_proportion=GridProportion.Eight):
"""
An element containing custom HTML.
"""
super().__init__(grid_proportion)
self.html = html
def generate_html(self, document: Document) -> str:
"""
Generates the HTML that represents the underlying element.
"""
return self.html
|
electronics/scripts/generate_svg.py | chrisdearman/splitflap | 2,138 | 26370 | <filename>electronics/scripts/generate_svg.py<gh_stars>1000+
#!/usr/bin/env python3
# Copyright 2015-2016 <NAME> and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pcbnew
import shutil
import subprocess
import pcb_util
from svg_processor import SvgProcessor
electronics_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def color_with_alpha(base_color, alpha):
return (base_color & ~(0xFF << 24)) | ((alpha & 0xFF) << 24)
def run(pcb_file):
output_directory = os.path.join(electronics_root, 'build')
temp_dir = os.path.join(output_directory, 'temp_layers')
shutil.rmtree(temp_dir, ignore_errors=True)
try:
os.makedirs(temp_dir)
plot_to_directory(pcb_file, output_directory, temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def plot_to_directory(pcb_file, output_directory, temp_dir):
board_name = os.path.splitext(os.path.basename(pcb_file))[0]
layers = [
{
'layer': pcbnew.B_SilkS,
'color': '#CC00CC',
'alpha': 0.8,
},
{
'layer': pcbnew.B_Cu,
'color': '#33EE33',
'alpha': 0.5,
},
{
'layer': pcbnew.F_Cu,
'color': '#CC0000',
'alpha': 0.5,
},
{
'layer': pcbnew.F_SilkS,
'color': '#00CCCC',
'alpha': 0.8,
},
{
'layer': pcbnew.Cmts_User,
'color': '#333333',
'alpha': 0.8,
},
{
'layer': pcbnew.Edge_Cuts,
'color': '#3333CC',
'alpha': 0.8,
},
]
with pcb_util.get_plotter(pcb_file, temp_dir) as plotter:
plotter.plot_options.SetExcludeEdgeLayer(True)
processed_svg_files = []
for i, layer in enumerate(layers):
output_filename = plotter.plot(layer['layer'], pcbnew.PLOT_FORMAT_SVG)
logger.info('Post-processing %s...', output_filename)
processor = SvgProcessor(output_filename)
def colorize(original):
if original.lower() == '#000000':
return layer['color']
return original
processor.apply_color_transform(colorize)
processor.wrap_with_group({
'opacity': str(layer['alpha']),
})
output_filename2 = os.path.join(temp_dir, 'processed-' + os.path.basename(output_filename))
processor.write(output_filename2)
processed_svg_files.append((output_filename2, processor))
# Plot the paste layer to its own SVG
logger.info('Plotting paste SVG')
output_filename = plotter.plot(pcbnew.F_Paste, pcbnew.PLOT_FORMAT_SVG)
processor = SvgProcessor(output_filename)
def colorize(original):
if original.lower() == '#000000':
return '#FF0000'
return original
processor.apply_group_style_transforms({
'fill-opacity': lambda _: '0',
'stroke': lambda _: '#FF0000',
'stroke-opacity': lambda _: '1',
'stroke-width': lambda _: '20',
})
paste_filename = os.path.join(output_directory, '%s_paste.svg' % board_name)
processor.write(paste_filename)
logger.info('Merging layers...')
final_svg = os.path.join(output_directory, '%s_merged.svg' % board_name)
shutil.copyfile(processed_svg_files[0][0], final_svg)
output_processor = SvgProcessor(final_svg)
for _, processor in processed_svg_files:
output_processor.import_groups(processor)
output_processor.write(final_svg)
logger.info('Rasterizing...')
raster_width = 1280
final_png = os.path.join(output_directory, '%s_merged.png' % board_name)
subprocess.check_call([
'inkscape',
'--export-area-drawing',
'--export-width', str(raster_width),
'--export-png', final_png,
'--export-background', '#FFFFFF',
final_svg,
])
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate an SVG rendering of the PCB')
parser.add_argument('pcb_file')
args = parser.parse_args()
run(args.pcb_file)
|
examples/deco_add_param.py | scholer/clize | 390 | 26390 | <gh_stars>100-1000
from sigtools.wrappers import decorator
from clize import run
@decorator
def with_uppercase(wrapped, *args, uppercase=False, **kwargs):
"""
Formatting options:
:param uppercase: Print output in capitals
"""
ret = wrapped(*args, **kwargs)
if uppercase:
return str(ret).upper()
else:
return ret
@with_uppercase
def hello_world(name=None):
"""Says hello world
:param name: Who to say hello to
"""
if name is not None:
return 'Hello ' + name
else:
return 'Hello world!'
if __name__ == '__main__':
run(hello_world)
|
mealpy/utils/visualize/linechart.py | thieu1995/mealpy | 162 | 26424 | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:12, 09/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import platform
from matplotlib import pyplot as plt
from numpy import arange
from pathlib import Path
import re
LIST_LINESTYLES = [
'-', # solid line style
'--', # dashed line style
'-.', # dash-dot line style
':', # point marker
's', # square marker
'*', # star marker
'p', # pentagon marker
'+', # plus marker
'x', # x marker
'd', # thin diamond marker
]
LIST_COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
def __clean_filename__(filename):
chars_to_remove = ["`", "~", "!", "@", "#", "$", "%", "^", "&", "*", ":", ",", "<", ">", ";", "+", "|"]
regular_expression = '[' + re.escape(''.join(chars_to_remove)) + ']'
temp = filename.encode("ascii", "ignore")
fname = temp.decode() # Removed all non-ascii characters
fname = re.sub(regular_expression, '', fname) # Removed all special characters
fname.replace("_", "-") # Replaced _ by -
return fname
def __check_filepath__(filename):
filename.replace("\\", "/") # For better handling the parent folder
if "/" in filename:
list_names = filename.split("/")[:-1] # Remove last element because it is filename
filepath = "/".join(list_names)
print(f"Fucking for real? {filepath}")
Path(filepath).mkdir(parents=True, exist_ok=True)
return filename
def _draw_line_(data=None, title=None, linestyle='-', color='b', x_label="#Iteration", y_label="Function Value",
filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data))
y = data
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(x, y, linestyle=linestyle, color=color,)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_in_same_figure_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Objective", filename=None, exts=(".png", ".pdf"), verbose=True):
n_lines = len(data)
len_lines = len(data[0])
x = arange(0, len_lines)
if n_lines == 1:
fig, ax = plt.subplots()
if list_legends is None:
ax.plot(x, data[0])
else:
ax.plot(x, data[0], label=list_legends[0])
ax.set_title(title)
elif n_lines > 1:
fig, ax_list = plt.subplots(n_lines, sharex=True)
fig.suptitle(title)
for idx, ax in enumerate(ax_list):
if list_legends is None:
ax.plot(x, data[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
else:
ax.plot(x, data[idx], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
ax.set_ylabel(f"Objective {idx + 1}")
if idx == (n_lines - 1):
ax.set_xlabel(x_label)
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def export_convergence_chart(data=None, title="Convergence Chart", linestyle='-', color='b', x_label="#Iteration",
y_label="Function Value", filename="convergence_chart", exts=(".png", ".pdf"), verbose=True):
_draw_line_(data, title=title, linestyle=linestyle, color=color, x_label=x_label, y_label=y_label,
filename=filename, exts=exts, verbose=verbose)
def export_explore_exploit_chart(data=None, title="Exploration vs Exploitation Percentages", list_legends=("Exploration %", "Exploitation %"),
list_styles=('-', '-'), list_colors=('blue', 'orange'), x_label="#Iteration", y_label="Percentage",
filename="explore_exploit_chart", exts=(".png", ".pdf"), verbose=True):
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_diversity_chart(data=None, title='Diversity Measurement Chart', list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="Diversity Measurement",
filename="diversity_chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_objectives_chart(data=None, title="Objectives chart", list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename="Objective-chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_in_same_figure_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_trajectory_chart(data=None, n_dimensions=1, title="Trajectory of some first agents after generations", list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="X1",
filename="1d_trajectory", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
if n_dimensions == 1:
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
elif n_dimensions == 2:
for idx, point in enumerate(data):
plt.plot(point[0], point[1], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
|
rules/tamper/wordpress.py | lavon321/Kunlun-M | 1,059 | 26452 | # -*- coding: utf-8 -*-
"""
wordpress
~~~~
tamper for wordpress
:author: LoRexxar <<EMAIL>>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""
wordpress = {
"esc_url": [1000, 10001, 10002],
"esc_js": [1000, 10001, 10002],
"esc_html": [1000, 10001, 10002],
"esc_attr": [1000, 10001, 10002],
"esc_textarea": [1000, 10001, 10002],
"tag_escape": [1000, 10001, 10002],
"esc_sql": [1004, 1005, 1006],
"_real_escape": [1004, 1005, 1006],
}
wordpress_controlled = [] |
tests/links_tests/model_tests/yolo_tests/test_yolo_v3.py | souravsingh/chainercv | 1,600 | 26460 | <reponame>souravsingh/chainercv<filename>tests/links_tests/model_tests/yolo_tests/test_yolo_v3.py
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links import YOLOv3
@testing.parameterize(*testing.product({
'n_fg_class': [1, 5, 20],
}))
class TestYOLOv3(unittest.TestCase):
def setUp(self):
self.link = YOLOv3(n_fg_class=self.n_fg_class)
self.insize = 416
self.n_bbox = (13 * 13 + 26 * 26 + 52 * 52) * 3
def _check_call(self):
x = self.link.xp.array(
np.random.uniform(-1, 1, size=(1, 3, self.insize, self.insize)),
dtype=np.float32)
locs, objs, confs = self.link(x)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (1, self.n_bbox, 4))
self.assertIsInstance(objs, chainer.Variable)
self.assertIsInstance(objs.array, self.link.xp.ndarray)
self.assertEqual(objs.shape, (1, self.n_bbox))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (1, self.n_bbox, self.n_fg_class))
@attr.slow
def test_call_cpu(self):
self._check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
@testing.parameterize(*testing.product({
'n_fg_class': [None, 10, 20],
'pretrained_model': ['voc0712'],
}))
class TestYOLOv3Pretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_fg_class': self.n_fg_class,
'pretrained_model': self.pretrained_model,
}
if self.pretrained_model == 'voc0712':
valid = self.n_fg_class in {None, 20}
if valid:
YOLOv3(**kwargs)
else:
with self.assertRaises(ValueError):
YOLOv3(**kwargs)
testing.run_module(__name__, __file__)
|
Plug-and-play module/attention/CBAM/cbam.py | riciche/SimpleCVReproduction | 923 | 26464 | <filename>Plug-and-play module/attention/CBAM/cbam.py
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=4):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.sharedMLP = nn.Sequential(
nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False), nn.ReLU(),
nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), "kernel size must be 3 or 7"
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avgout, maxout], dim=1)
x = self.conv(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out # 广播机制
out = self.sa(out) * out # 广播机制
if self.downsample is not None:
print("downsampling")
residual = self.downsample(x)
print(out.shape, residual.shape)
out += residual
out = self.relu(out)
return out
if __name__ == "__main__":
downsample = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(32))
x = torch.ones(3, 16, 32, 32)
model = BasicBlock(16, 32, stride=1, downsample=downsample)
print(model(x).shape) |
diffrax/solver/kvaerno5.py | FedericoV/diffrax | 377 | 26472 | import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .runge_kutta import AbstractESDIRK, ButcherTableau
γ = 0.26
a21 = γ
a31 = 0.13
a32 = 0.84033320996790809
a41 = 0.22371961478320505
a42 = 0.47675532319799699
a43 = -0.06470895363112615
a51 = 0.16648564323248321
a52 = 0.10450018841591720
a53 = 0.03631482272098715
a54 = -0.13090704451073998
a61 = 0.13855640231268224
a62 = 0
a63 = -0.04245337201752043
a64 = 0.02446657898003141
a65 = 0.61943039072480676
a71 = 0.13659751177640291
a72 = 0
a73 = -0.05496908796538376
a74 = -0.04118626728321046
a75 = 0.62993304899016403
a76 = 0.06962479448202728
# Predictors taken from
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/tableaus/sdirk_tableaus.jl#L1444 # noqa: E501
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/perform_step/kencarp_kvaerno_perform_step.jl#L1123 # noqa: E501
# This is with the exception of α21, which is mistakenly set to zero.
#
# See also /devdocs/predictor_dirk.md
α21 = 1.0
α31 = -1.366025403784441
α32 = 2.3660254037844357
α41 = -0.19650552613122207
α42 = 0.8113579546496623
α43 = 0.38514757148155954
α51 = 0.10375304369958693
α52 = 0.937994698066431
α53 = -0.04174774176601781
α61 = -0.17281112873898072
α62 = 0.6235784481025847
α63 = 0.5492326806363959
α71 = a61
α72 = a62
α73 = a63
α74 = a64
α75 = a65
α76 = γ
_kvaerno5_tableau = ButcherTableau(
a_lower=(
np.array([a21]),
np.array([a31, a32]),
np.array([a41, a42, a43]),
np.array([a51, a52, a53, a54]),
np.array([a61, a62, a63, a64, a65]),
np.array([a71, a72, a73, a74, a75, a76]),
),
a_diagonal=np.array([0, γ, γ, γ, γ, γ, γ]),
a_predictor=(
np.array([α21]),
np.array([α31, α32]),
np.array([α41, α42, α43]),
np.array([α51, α52, α53, 0]),
np.array([α61, α62, α63, 0, 0]),
np.array([α71, α72, α73, α74, α75, α76]),
),
b_sol=np.array([a71, a72, a73, a74, a75, a76, γ]),
b_error=np.array(
[a71 - a61, a72 - a62, a73 - a63, a74 - a64, a75 - a65, a76 - γ, γ]
),
c=np.array(
[0.52, 1.230333209967908, 0.8957659843500759, 0.43639360985864756, 1.0, 1.0]
),
)
class Kvaerno5(AbstractESDIRK):
r"""Kvaerno's 5/4 method.
A-L stable stiffly accurate 5th order ESDIRK method. Has an embedded 4th order
method for adaptive step sizing. Uses 7 stages.
When solving an ODE over the interval $[t_0, t_1]$, note that this method will make
some evaluations slightly past $t_1$.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""
tableau = _kvaerno5_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 5
|
Hinting/Remove Zero Deltas in Selected Glyphs.py | justanotherfoundry/Glyphs-Scripts | 283 | 26473 | #MenuTitle: Remove Zero Deltas in Selected Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Goes through all layers of each selected glyph, and deletes all TT Delta Hints with an offset of zero. Detailed Report in Macro Window.
"""
def process( Layer ):
try:
count = 0
for i in reversed(range(len(Layer.hints))):
hint = Layer.hints[i]
if hint.type == TTDELTA:
elementDict = hint.elementDict()
if "settings" in elementDict:
settings = elementDict["settings"]
if settings:
for deltaType in ("deltaH","deltaV"):
if deltaType in settings:
for transformType in settings[deltaType]:
deltas = settings[deltaType][transformType]
for ppmSize in deltas:
if deltas[ppmSize] == 0:
del deltas[ppmSize]
count += 1
# clean up delta PPMs:
if len(settings[deltaType][transformType]) == 0:
del settings[deltaType][transformType]
# clean up delta directions:
if len(settings[deltaType]) == 0:
del settings[deltaType]
# clean up hints:
if not elementDict["settings"]:
del Layer.hints[i]
print(" Deleted %i zero delta%s on layer '%s'." % (
count,
"" if count == 1 else "s",
Layer.name,
))
return count
except Exception as e:
Glyphs.showMacroWindow()
import traceback
print(traceback.format_exc())
print()
print(e)
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
Glyphs.clearLog() # clears log in Macro window
totalCount = 0
for selectedLayer in selectedLayers:
thisGlyph = selectedLayer.parent
print("%s:" % thisGlyph.name)
thisGlyph.beginUndo() # begin undo grouping
for thisLayer in thisGlyph.layers:
totalCount += process( thisLayer )
thisGlyph.endUndo() # end undo grouping
if totalCount:
Message(
title="%i Zero Delta%s Deleted" % (
totalCount,
"" if totalCount == 1 else "s",
),
message="Deleted %i TT delta hint%s with zero offset in %i selected glyph%s (%s%s). Detailed report in Macro Window." % (
totalCount,
"" if totalCount == 1 else "s",
len(selectedLayers),
"" if len(selectedLayers) == 1 else "s",
", ".join([l.parent.name for l in selectedLayers[:min(20,len(selectedLayers))]]),
",..." if len(selectedLayers) > 20 else "",
),
OKButton=u"👍🏻 OK",
)
else:
Message(
title="No Zero Deltas",
message="No TT delta hints with zero offset were found in selected glyph%s (%s%s)." % (
"" if len(selectedLayers) == 1 else "s",
", ".join([l.parent.name for l in selectedLayers[:min(20,len(selectedLayers))]]),
",..." if len(selectedLayers) > 20 else "",
),
OKButton=u"🍸 Cheers")
|
torchvision/prototype/utils/__init__.py | yoshitomo-matsubara/vision | 12,063 | 26476 | <reponame>yoshitomo-matsubara/vision<filename>torchvision/prototype/utils/__init__.py
from . import _internal
|
annotation/application/document.py | seal-git/chABSA-dataset | 107 | 26519 | <gh_stars>100-1000
import os
import shutil
import json
class Document():
def __init__(self,
doc_id, doc_text, edi_id, company_name,
body, topic):
self.doc_id = doc_id
self.doc_text = doc_text
self.edi_id = edi_id
self.company_name = company_name
self.body = body
self.topic = topic
def get_header(self):
return {
"document_id": self.document_id,
"document_name": self.document_name,
"doc_text": self.doc_text,
"edi_id": self.edi_id
}
@property
def document_id(self):
return self.edi_id
@property
def document_name(self):
return self.company_name
@classmethod
def load(cls, file_path):
if not os.path.isfile(file_path):
raise Exception("File {} does not found.".format(file_path))
with open(file_path, encoding="utf-8") as f:
doc = json.load(f)
doc_id = doc["doc_id"]
doc_text = doc["doc_text"]
edi_id = doc["edi_id"]
company_name = doc["company_name"]
body = doc["body"]
topic = doc["topic"]
return cls(doc_id, doc_text, edi_id, company_name, body, topic)
class Label():
def __init__(self, label, label_group="", display_name="", display_style=""):
self.label = label
self.label_group = label_group
self.display_name = display_name
self.display_style = display_style
def dumps(self):
return {
"label": self.label,
"label_group": self.label_group,
"display_name": self.display_name,
"display_style": self.display_style
}
class Annotation():
def __init__(self, target_id, target, label, label_target="", position=(), annotator="anonymous"):
self.target_id = int(target_id)
self.target = target
self.label = label
self.label_target = label_target
self.position = position
if len(self.position) > 0:
self.position = [int(i) for i in self.position]
self.annotator = annotator
def dumps(self):
a = {
"target_id": self.target_id,
"target": self.target,
"label": self.label,
"label_target": self.label_target,
"position": self.position,
"annotator": self.annotator
}
return a
@classmethod
def loads(cls, obj):
a = Annotation(
obj["target_id"],
obj["target"],
obj["label"],
obj["label_target"],
obj["position"] if "position" in obj else ()
)
if "annotator" in obj:
a.annotator = obj["annotator"]
return a
class AnnotationTask():
ANNOTATION_CLASS = Annotation
def __init__(self, document, annotations=()):
self.document = document
self.annotations = {} if len(annotations) == 0 else annotations
def get_targets(self):
raise Exception("Sub class have to specify texts for annotation")
def get_labels(self):
raise Exception("Sub class have to define label candidates")
def get_dataset(self):
dataset = {}
for target_id, target in self.get_targets():
a_s = []
if target_id in self.annotations:
a_s = [a.dumps() for a in self.annotations[target_id]]
dataset[target_id] = {
"target": target,
"annotations": a_s
}
return dataset
def save_annotations(self, target_dir, annotation_objs, annotator):
_dir = os.path.join(target_dir, self.document.document_id)
annotations = [self.ANNOTATION_CLASS.loads(a_obj)
for a_obj in annotation_objs]
if annotator:
for a in annotations:
a.annotator = annotator
if os.path.exists(_dir):
for f in os.listdir(_dir):
if f.startswith("ann__") and f.endswith("__{}.json".format(annotator)):
os.remove(os.path.join(_dir, f))
save_bucket = {}
for a in annotations:
key = (a.target_id, a.annotator)
if key not in save_bucket:
save_bucket[key] = []
save_bucket[key].append(a)
if len(save_bucket) > 0 and not os.path.exists(_dir):
os.mkdir(_dir)
for key in save_bucket:
file_name = self._make_annotation_file_name(*key)
body = {
"annotations": [a.dumps() for a in save_bucket[key]]
}
file_path = os.path.join(_dir, file_name)
with open(file_path, mode="w", encoding="utf-8") as f:
json.dump(body, f, ensure_ascii=False, indent=2)
def _make_annotation_file_name(self, target_id, annotator):
return "ann__{}__{}__{}.json".format(self.document.document_id, target_id, annotator)
@classmethod
def load(cls, target_dir, document, annotator=""):
annotations = {}
_dir = os.path.join(target_dir, document.document_id)
if os.path.exists(_dir):
for f in sorted(os.listdir(_dir)):
if not f.startswith("ann__"):
continue
if annotator and not f.endswith("__{}.json".format(annotator)):
continue
path = os.path.join(_dir, f)
with open(path, encoding="utf-8") as af:
annotation_objs = json.load(af)["annotations"]
a_list = [cls.ANNOTATION_CLASS.loads(a_obj) for a_obj in annotation_objs]
if len(a_list) > 0:
target_id = a_list[0].target_id
if target_id not in annotations:
annotations[target_id] = a_list
else:
annotations[target_id] += a_list
instance = cls(document, annotations)
return instance
|
__scraping__/just-eat.fr - robobrowser/main.py | whitmans-max/python-examples | 140 | 26538 | <reponame>whitmans-max/python-examples
# date: 2019.05.05
# author: Bartłomiej 'furas' Burek
import robobrowser
br = robobrowser.RoboBrowser(user_agent='Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')
br.parser = 'lxml'
br.open("https://www.just-eat.fr")
print(br.get_forms())
iframe_src = br.select('iframe')[0]['src']
print(iframe_src)
br.open("https://www.just-eat.fr"+iframe_src)
print(br.parsed)
br.open("https://www.just-eat.fr")
print(br.get_forms())
|
nototools/drop_hints.py | RoelN/nototools | 156 | 26541 | <filename>nototools/drop_hints.py
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = "<EMAIL> (<NAME>)"
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
font.save(argv[2])
if __name__ == "__main__":
main(sys.argv)
|
floss/decoding_manager.py | fireeye/flare-floss | 2,067 | 26545 | <filename>floss/decoding_manager.py
# Copyright (C) 2017 Mandiant, Inc. All Rights Reserved.
import logging
from typing import List, Tuple
from dataclasses import dataclass
import viv_utils
import envi.memory
import viv_utils.emulator_drivers
from envi import Emulator
from . import api_hooks
logger = logging.getLogger("floss")
MAX_MAPS_SIZE = 1024 * 1024 * 100 # 100MB max memory allocated in an emulator instance
def is_import(emu, va):
"""
Return True if the given VA is that of an imported function.
"""
# TODO: also check location type
t = emu.getVivTaint(va)
if t is None:
return False
return t[1] == "import"
# type aliases for envi.memory map
MemoryMapDescriptor = Tuple[
# va
int,
# size
int,
# perms
int,
# name
str,
]
# type aliases for envi.memory map
MemoryMap = Tuple[
# start
int,
# end
int,
# descriptor
MemoryMapDescriptor,
# content
bytes,
]
# type aliases for envi.memory map
Memory = List[MemoryMap]
@dataclass
class Snapshot:
"""
A snapshot of the state of the CPU and memory.
Attributes:
memory: a snapshot of the memory contents
sp: the stack counter
pc: the instruction pointer
"""
memory: Memory
sp: int
pc: int
def get_map_size(emu):
size = 0
for mapva, mapsize, mperm, mfname in emu.getMemoryMaps():
mapsize += size
return size
class MapsTooLargeError(Exception):
pass
def make_snapshot(emu: Emulator) -> Snapshot:
"""
Create a snapshot of the current CPU and memory.
"""
if get_map_size(emu) > MAX_MAPS_SIZE:
logger.debug("emulator mapped too much memory: 0x%x", get_map_size(emu))
raise MapsTooLargeError()
return Snapshot(emu.getMemorySnap(), emu.getStackCounter(), emu.getProgramCounter())
@dataclass
class Delta:
"""
a pair of snapshots from before and after an operation.
facilitates diffing the state of an emulator.
"""
pre: Snapshot
post: Snapshot
class DeltaCollectorHook(viv_utils.emulator_drivers.Hook):
"""
hook that collects Deltas at each imported API call.
"""
def __init__(self, pre_snap: Snapshot):
super(DeltaCollectorHook, self).__init__()
self._pre_snap = pre_snap
self.deltas: List[Delta] = []
def hook(self, callname, driver, callconv, api, argv):
if is_import(driver._emu, driver._emu.getProgramCounter()):
try:
self.deltas.append(Delta(self._pre_snap, make_snapshot(driver._emu)))
except MapsTooLargeError:
logger.debug("despite call to import %s, maps too large, not extracting strings", callname)
pass
def emulate_function(
emu: Emulator, function_index, fva: int, return_address: int, max_instruction_count: int
) -> List[Delta]:
"""
Emulate a function and collect snapshots at each interesting place.
These interesting places include calls to imported API functions
and the final state of the emulator.
Emulation continues until the return address is hit, or
the given max_instruction_count is hit.
Some library functions are shimmed, such as memory allocation routines.
This helps "normal" routines emulate correct using standard library function.
These include:
- GetProcessHeap
- RtlAllocateHeap
- AllocateHeap
- malloc
:type function_index: viv_utils.FunctionIndex
:param fva: The start address of the function to emulate.
:param return_address: The expected return address of the function.
Emulation stops here.
:param max_instruction_count: The max number of instructions to emulate.
This helps avoid unexpected infinite loops.
"""
try:
pre_snap = make_snapshot(emu)
except MapsTooLargeError:
logger.warn("initial snapshot mapped too much memory, can't extract strings")
return []
delta_collector = DeltaCollectorHook(pre_snap)
try:
logger.debug("Emulating function at 0x%08X", fva)
driver = viv_utils.emulator_drivers.DebuggerEmulatorDriver(emu)
monitor = api_hooks.ApiMonitor(emu.vw, function_index)
driver.add_monitor(monitor)
driver.add_hook(delta_collector)
with api_hooks.defaultHooks(driver):
driver.runToVa(return_address, max_instruction_count)
except viv_utils.emulator_drivers.InstructionRangeExceededError:
logger.debug("Halting as emulation has escaped!")
except envi.InvalidInstruction:
logger.debug("vivisect encountered an invalid instruction. will continue processing.", exc_info=True)
except envi.UnsupportedInstruction:
logger.debug("vivisect encountered an unsupported instruction. will continue processing.", exc_info=True)
except envi.BreakpointHit:
logger.debug(
"vivisect encountered an unexpected emulation breakpoint. will continue processing.", exc_info=True
)
except viv_utils.emulator_drivers.StopEmulation:
pass
except Exception:
logger.debug("vivisect encountered an unexpected exception. will continue processing.", exc_info=True)
logger.debug("Ended emulation at 0x%08X", emu.getProgramCounter())
deltas = delta_collector.deltas
try:
deltas.append(Delta(pre_snap, make_snapshot(emu)))
except MapsTooLargeError:
logger.debug("failed to create final snapshot, emulator mapped too much memory, skipping")
pass
return deltas
|
datadog_checks_base/tests/openmetrics/test_interface.py | vbarbaresi/integrations-core | 663 | 26555 | <filename>datadog_checks_base/tests/openmetrics/test_interface.py
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base import OpenMetricsBaseCheckV2
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.testing import requires_py3
from .utils import get_check
pytestmark = [requires_py3, pytest.mark.openmetrics, pytest.mark.openmetrics_interface]
def test_default_config(aggregator, dd_run_check, mock_http_response):
class Check(OpenMetricsBaseCheckV2):
__NAMESPACE__ = 'test'
def get_default_config(self):
return {'metrics': ['.+'], 'rename_labels': {'foo': 'bar'}}
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
"""
)
check = Check('test', {}, [{'openmetrics_endpoint': 'test'}])
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes', 6396288, metric_type=aggregator.GAUGE, tags=['endpoint:test', 'bar:baz']
)
aggregator.assert_all_metrics_covered()
def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
"""
)
check = get_check(
{'metrics': ['.+', {'state': {'type': 'service_check', 'status_map': {'3': 'ok'}}}], 'tags': ['foo:bar']}
)
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
aggregator.reset()
check.set_dynamic_tags('baz:foo')
dd_run_check(check)
aggregator.assert_metric(
'test.go_memstats_alloc_bytes',
6396288,
metric_type=aggregator.GAUGE,
tags=['endpoint:test', 'foo:bar', 'foo:baz', 'baz:foo'],
)
aggregator.assert_service_check('test.state', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_service_check('test.openmetrics.health', ServiceCheck.OK, tags=['endpoint:test', 'foo:bar'])
aggregator.assert_all_metrics_covered()
assert len(aggregator.service_check_names) == 2
|
hbaselines/envs/deeploco/envs.py | reufko/h-baselines | 186 | 26585 | """Script containing the DeepLoco environments."""
import gym
import numpy as np
import os
import sys
import cv2
try:
sys.path.append(os.path.join(os.environ["TERRAINRL_PATH"], "simAdapter"))
import terrainRLSim # noqa: F401
except (KeyError, ImportError, ModuleNotFoundError):
pass
class BipedalSoccer(gym.Env):
"""Bipedal Soccer environment.
In this environment, a bipedal agent is placed in an open field with a
soccer ball. The agent is rewarded for moving to the ball, and additionally
dribbling the ball to the target. The reward function is a weighted sum of
the agent's distance from the ball and the distance of the ball from a
desired goal position. This reward is positive to discourage the agent from
falling prematurely.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self):
"""Instantiate the environment."""
self.wrapped_env = terrainRLSim.getEnv(
"PD-Biped3D-HLC-Soccer-v1", render=False)
# Add the time horizon.
self.horizon = 512
@property
def observation_space(self):
"""See parent class."""
return self.wrapped_env.observation_space
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
def step(self, action):
"""See parent class."""
obs, rew, done, info = self.wrapped_env.step(np.array([action]))
return obs[0], rew[0][0], done, info
def reset(self):
"""See parent class."""
return self.wrapped_env.reset()[0]
def render(self, mode='human'):
"""See parent class."""
return self.wrapped_env.render(mode=mode)
class BipedalObstacles(gym.Env):
"""Bipedal Obstacles environment.
In this environment, a bipedal agent is placed in an open field with
obstacles scattered throughout the world. The goal of the agent is to
walk around the world and reach a goal position.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self, render):
"""Instantiate the environment.
Parameters
----------
render : bool
whether to render the environment
"""
self.t = 0
if render:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-render-v2")
else:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-v2")
# Add the time horizon.
self.horizon = 2000
@property
def observation_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[:-2],
high=20 * self.wrapped_env.observation_space.high[:-2],
dtype=np.float32)
@property
def context_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[-2:],
high=20 * self.wrapped_env.observation_space.high[-2:],
dtype=np.float32)
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
@property
def current_context(self):
"""See parent class."""
return self.wrapped_env.env.getObservation()[-2:]
def step(self, action):
"""See parent class."""
self.t += 1
obs, rew, done, info = self.wrapped_env.step(action)
done = done or self.t >= self.horizon
return obs[:-2], rew, done, info
def reset(self):
"""See parent class."""
self.t = 0
return self.wrapped_env.reset()[:-2]
def render(self, mode='human'):
"""See parent class."""
image = self.wrapped_env.env.render(
headless_step=True)
if mode == 'human':
f = np.flip(image.astype(np.float32) / 255.0, axis=0)
f = np.flip(f, axis=2)
cv2.imshow("PD-Biped3D-HLC-Obstacles-v2", f)
cv2.waitKey(1)
elif mode == 'rgb_array':
return image
|
river/tree/hoeffding_tree.py | online-ml/creme | 1,105 | 26589 | <reponame>online-ml/creme<filename>river/tree/hoeffding_tree.py
import collections
import functools
import io
import math
import typing
from abc import ABC, abstractmethod
from river import base
from river.utils.skmultiflow_utils import (
calculate_object_size,
normalize_values_in_dict,
)
from .nodes.branch import (
DTBranch,
NominalBinaryBranch,
NominalMultiwayBranch,
NumericBinaryBranch,
NumericMultiwayBranch,
)
from .nodes.leaf import HTLeaf
try:
import graphviz
GRAPHVIZ_INSTALLED = True
except ImportError:
GRAPHVIZ_INSTALLED = False
class HoeffdingTree(ABC):
"""Base class for Hoeffding Decision Trees.
This is an **abstract class**, so it cannot be used directly. It defines base operations
and properties that all the Hoeffding decision trees must inherit or implement according to
their own design.
Parameters
----------
max_depth
The maximum depth a tree can reach. If `None`, the tree will grow indefinitely.
binary_split
If True, only allow binary splits.
max_size
The max size of the tree, in Megabytes (MB).
memory_estimate_period
Interval (number of processed instances) between memory consumption checks.
stop_mem_management
If True, stop growing as soon as memory limit is hit.
remove_poor_attrs
If True, disable poor attributes to reduce memory usage.
merit_preprune
If True, enable merit-based tree pre-pruning.
"""
def __init__(
self,
max_depth: int = None,
binary_split: bool = False,
max_size: float = 100.0,
memory_estimate_period: int = 1000000,
stop_mem_management: bool = False,
remove_poor_attrs: bool = False,
merit_preprune: bool = True,
):
# Properties common to all the Hoeffding trees
self._split_criterion: str = ""
self._leaf_prediction: str = ""
self.max_depth: float = max_depth if max_depth is not None else math.inf
self.binary_split: bool = binary_split
self._max_size: float = max_size
self._max_byte_size: float = self._max_size * (2**20) # convert to byte
self.memory_estimate_period: int = memory_estimate_period
self.stop_mem_management: bool = stop_mem_management
self.remove_poor_attrs: bool = remove_poor_attrs
self.merit_preprune: bool = merit_preprune
self._root: typing.Union[DTBranch, HTLeaf, None] = None
self._n_active_leaves: int = 0
self._n_inactive_leaves: int = 0
self._inactive_leaf_size_estimate: float = 0.0
self._active_leaf_size_estimate: float = 0.0
self._size_estimate_overhead_fraction: float = 1.0
self._growth_allowed = True
self._train_weight_seen_by_model: float = 0.0
@staticmethod
def _hoeffding_bound(range_val, confidence, n):
r"""Compute the Hoeffding bound, used to decide how many samples are necessary at each
node.
Notes
-----
The Hoeffding bound is defined as:
$\\epsilon = \\sqrt{\\frac{R^2\\ln(1/\\delta))}{2n}}$
where:
$\\epsilon$: Hoeffding bound.
$R$: Range of a random variable. For a probability the range is 1, and for an
information gain the range is log *c*, where *c* is the number of classes.
$\\delta$: Confidence. 1 minus the desired probability of choosing the correct
attribute at any given node.
$n$: Number of samples.
Parameters
----------
range_val
Range value.
confidence
Confidence of choosing the correct attribute.
n
Number of processed samples.
"""
return math.sqrt(
(range_val * range_val * math.log(1.0 / confidence)) / (2.0 * n)
)
@property
def max_size(self):
"""Max allowed size tree can reach (in MB)."""
return self._max_size
@max_size.setter
def max_size(self, size):
self._max_size = size
self._max_byte_size = self._max_size * (2**20)
@property
def height(self) -> int:
if self._root:
return self._root.height
@property
def n_nodes(self):
if self._root:
return self._root.n_nodes
@property
def n_branches(self):
if self._root:
return self._root.n_branches
@property
def n_leaves(self):
if self._root:
return self._root.n_leaves
@property
def n_active_leaves(self):
return self._n_active_leaves
@property
def n_inactive_leaves(self):
return self._n_inactive_leaves
@property
def summary(self):
"""Collect metrics corresponding to the current status of the tree
in a string buffer.
"""
summary = {
"n_nodes": self.n_nodes,
"n_branches": self.n_branches,
"n_leaves": self.n_leaves,
"n_active_leaves": self.n_active_leaves,
"n_inactive_leaves": self.n_inactive_leaves,
"height": self.height,
"total_observed_weight": self._train_weight_seen_by_model,
}
return summary
def to_dataframe(self):
"""Return a representation of the current tree structure organized in a
`pandas.DataFrame` object.
In case the tree is empty or it only contains a single node (a leaf), `None` is returned.
Returns
-------
df
A `pandas.DataFrame` depicting the tree structure.
"""
if self._root is not None and isinstance(self._root, DTBranch):
return self._root.to_dataframe()
def _branch_selector(
self, numerical_feature=True, multiway_split=False
) -> typing.Type[DTBranch]:
"""Create a new split node."""
if numerical_feature:
if not multiway_split:
return NumericBinaryBranch
else:
return NumericMultiwayBranch
else:
if not multiway_split:
return NominalBinaryBranch
else:
return NominalMultiwayBranch
@abstractmethod
def _new_leaf(
self, initial_stats: dict = None, parent: typing.Union[HTLeaf, DTBranch] = None
) -> HTLeaf:
"""Create a new learning node.
The characteristics of the learning node depends on the tree algorithm.
Parameters
----------
initial_stats
Target statistics set from the parent node.
parent
Parent node to inherit from.
Returns
-------
A new learning node.
"""
@property
def split_criterion(self) -> str:
"""Return a string with the name of the split criterion being used by the tree."""
return self._split_criterion
@split_criterion.setter
@abstractmethod
def split_criterion(self, split_criterion):
"""Define the split criterion to be used by the tree."""
@property
def leaf_prediction(self) -> str:
"""Return the prediction strategy used by the tree at its leaves."""
return self._leaf_prediction
@leaf_prediction.setter
@abstractmethod
def leaf_prediction(self, leaf_prediction):
"""Define the prediction strategy used by the tree in its leaves."""
def _enforce_size_limit(self):
"""Track the size of the tree and disable/enable nodes if required.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: <NAME>., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""
tree_size = self._size_estimate_overhead_fraction * (
self._active_leaf_size_estimate
+ self._n_inactive_leaves * self._inactive_leaf_size_estimate
)
if self._n_inactive_leaves > 0 or tree_size > self._max_byte_size:
if self.stop_mem_management:
self._growth_allowed = False
return
leaves = self._find_leaves()
leaves.sort(key=lambda leaf: leaf.calculate_promise())
max_active = 0
while max_active < len(leaves):
max_active += 1
if (
(
max_active * self._active_leaf_size_estimate
+ (len(leaves) - max_active) * self._inactive_leaf_size_estimate
)
* self._size_estimate_overhead_fraction
) > self._max_byte_size:
max_active -= 1
break
cutoff = len(leaves) - max_active
for i in range(cutoff):
if leaves[i].is_active():
leaves[i].deactivate()
self._n_inactive_leaves += 1
self._n_active_leaves -= 1
for i in range(cutoff, len(leaves)):
if not leaves[i].is_active() and leaves[i].depth < self.max_depth:
leaves[i].activate()
self._n_active_leaves += 1
self._n_inactive_leaves -= 1
def _estimate_model_size(self):
"""Calculate the size of the model and trigger tracker function
if the actual model size exceeds the max size in the configuration.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: <NAME>., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""
leaves = self._find_leaves()
total_active_size = 0
total_inactive_size = 0
for leaf in leaves:
if leaf.is_active():
total_active_size += calculate_object_size(leaf)
else:
total_inactive_size += calculate_object_size(leaf)
if total_active_size > 0:
self._active_leaf_size_estimate = total_active_size / self._n_active_leaves
if total_inactive_size > 0:
self._inactive_leaf_size_estimate = (
total_inactive_size / self._n_inactive_leaves
)
actual_model_size = calculate_object_size(self)
estimated_model_size = (
self._n_active_leaves * self._active_leaf_size_estimate
+ self._n_inactive_leaves * self._inactive_leaf_size_estimate
)
self._size_estimate_overhead_fraction = actual_model_size / estimated_model_size
if actual_model_size > self._max_byte_size:
self._enforce_size_limit()
def _deactivate_all_leaves(self):
"""Deactivate all leaves."""
leaves = self._find_leaves()
for leaf in leaves:
leaf.deactivate()
self._n_inactive_leaves += 1
self._n_active_leaves -= 1
def _find_leaves(self) -> typing.List[HTLeaf]:
"""Find learning nodes in the tree.
Returns
-------
List of learning nodes in the tree.
"""
return [leaf for leaf in self._root.iter_leaves()]
# Adapted from creme's original implementation
def debug_one(self, x: dict) -> typing.Union[str, None]:
"""Print an explanation of how `x` is predicted.
Parameters
----------
x
A dictionary of features.
Returns
-------
A representation of the path followed by the tree to predict `x`; `None` if
the tree is empty.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
"""
if self._root is None:
return
# We'll redirect all the print statement to a buffer, we'll return the content of the
# buffer at the end
buffer = io.StringIO()
_print = functools.partial(print, file=buffer)
for node in self._root.walk(x, until_leaf=True):
if isinstance(node, HTLeaf):
_print(repr(node))
else:
try:
child_index = node.branch_no(x) # noqa
except KeyError:
child_index, _ = node.most_common_path()
_print(node.repr_branch(child_index)) # noqa
return buffer.getvalue()
def draw(self, max_depth: int = None):
"""Draw the tree using the `graphviz` library.
Since the tree is drawn without passing incoming samples, classification trees
will show the majority class in their leaves, whereas regression trees will
use the target mean.
Parameters
----------
max_depth
Only the root will be drawn when set to `0`. Every node will be drawn when
set to `None`.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
Examples
--------
>>> from river import datasets
>>> from river import tree
>>> model = tree.HoeffdingTreeClassifier(
... grace_period=5,
... split_confidence=1e-5,
... split_criterion='gini',
... max_depth=10,
... tie_threshold=0.05,
... )
>>> for x, y in datasets.Phishing():
... model = model.learn_one(x, y)
>>> dot = model.draw()
.. image:: ../../docs/img/dtree_draw.svg
:align: center
"""
counter = 0
def iterate(node=None):
if node is None:
yield None, None, self._root, 0, None
yield from iterate(self._root)
nonlocal counter
parent_no = counter
if isinstance(node, DTBranch):
for branch_index, child in enumerate(node.children):
counter += 1
yield parent_no, node, child, counter, branch_index
if isinstance(child, DTBranch):
yield from iterate(child)
if max_depth is None:
max_depth = math.inf
dot = graphviz.Digraph(
graph_attr={"splines": "ortho", "forcelabels": "true", "overlap": "false"},
node_attr={
"shape": "box",
"penwidth": "1.2",
"fontname": "trebuchet",
"fontsize": "11",
"margin": "0.1,0.0",
},
edge_attr={"penwidth": "0.6", "center": "true", "fontsize": "7 "},
)
if isinstance(self, base.Classifier):
n_colors = len(self.classes) # noqa
else:
n_colors = 1
# Pick a color palette which maps classes to colors
new_color = functools.partial(next, iter(_color_brew(n_colors)))
palette = collections.defaultdict(new_color)
for parent_no, parent, child, child_no, branch_index in iterate():
if child.depth > max_depth:
continue
if isinstance(child, DTBranch):
text = f"{child.feature}" # noqa
else:
text = f"{repr(child)}\nsamples: {int(child.total_weight)}"
# Pick a color, the hue depends on the class and the transparency on the distribution
if isinstance(self, base.Classifier):
class_proba = normalize_values_in_dict(child.stats, inplace=False)
mode = max(class_proba, key=class_proba.get)
p_mode = class_proba[mode]
try:
alpha = (p_mode - 1 / n_colors) / (1 - 1 / n_colors)
fillcolor = str(transparency_hex(color=palette[mode], alpha=alpha))
except ZeroDivisionError:
fillcolor = "#FFFFFF"
else:
fillcolor = "#FFFFFF"
dot.node(f"{child_no}", text, fillcolor=fillcolor, style="filled")
if parent_no is not None:
dot.edge(
f"{parent_no}",
f"{child_no}",
xlabel=parent.repr_branch(branch_index, shorten=True),
)
return dot
# Utility adapted from the original creme's implementation
def _color_brew(n: int) -> typing.List[typing.Tuple[int, int, int]]:
"""Generate n colors with equally spaced hues.
Parameters
----------
n
The number of required colors.
Returns
-------
List of n tuples of form (R, G, B) being the components of each color.
References
----------
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
"""
colors = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in [i for i in range(25, 385, int(360 / n))]:
# Calculate some intermediate values
h_bar = h / 60.0
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [
(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0),
]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
colors.append(
((int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m))))
)
return colors
# Utility adapted from the original creme's implementation
def transparency_hex(color: typing.Tuple[int, int, int], alpha: float) -> str:
"""Apply alpha coefficient on hexadecimal color."""
return "#%02x%02x%02x" % tuple(
[int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
)
|
services/docker/webrecorder/local.py | rachelaus/perma | 317 | 26591 | import hashlib
import logging
import os
import shutil
import traceback
from contextlib import closing
from pywb.utils.loaders import BlockLoader
from webrecorder.rec.storage.base import BaseStorage
from webrecorder.rec.storage.storagepaths import add_local_store_prefix, strip_prefix
logger = logging.getLogger('wr.io')
# ============================================================================
class DirectLocalFileStorage(BaseStorage):
"""Webrecorder storage (local files)."""
def __init__(self):
"""Initialize Webrecorder storage."""
super(DirectLocalFileStorage, self).__init__(os.environ['STORAGE_ROOT'])
def delete_collection_dir(self, dir_path):
"""Delete collection directory.
:param str dir_path: directory path
:returns: whether successful or not
:rtype: bool
"""
local_dir = os.path.join(self.storage_root, dir_path)
try:
logger.debug('Local Store: Deleting Directory: ' + local_dir)
parent_dir = os.path.dirname(local_dir)
shutil.rmtree(local_dir)
os.removedirs(parent_dir)
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
def do_upload(self, target_url, full_filename):
"""Upload file into local file storage.
:param str target_url: target URL
:param str full_filename: path
:returns: whether successful or not
:rtype: bool
"""
os.makedirs(os.path.dirname(target_url), exist_ok=True)
try:
if full_filename != target_url:
shutil.copyfile(full_filename, target_url)
else:
logger.debug('Local Store: Same File, No Upload')
return True
except Exception as e:
logger.error(str(e))
return False
def is_valid_url(self, target_url):
"""Return whether given target URL is an existing file.
:param str target_url: target URL
:returns: whether given target URL is an existing file
:rtype: bool
"""
return os.path.isfile(target_url)
def get_client_url(self, target_url):
"""Get client URL.
:param str target_url: target URL
:returns: client URL
:rtype: str
"""
return add_local_store_prefix(target_url.replace(os.path.sep, '/'))
def client_url_to_target_url(self, client_url):
"""Get target URL (from client URL).
:param str client URL: client URL
:returns: target URL
:rtype: str
"""
return strip_prefix(client_url)
def do_delete(self, target_url, client_url):
"""Delete file from storage.
:param str target_url: target URL
:returns: whether successful or not
:rtype: bool
"""
try:
logger.debug('Local Store: Deleting: ' + target_url)
os.remove(target_url)
# if target_url.startswith(self.storage_root):
# os.removedirs(os.path.dirname(target_url))
return True
except Exception as e:
if e.errno != 2:
logger.error(str(e))
return False
# ============================================================================
class LocalFileStorage(DirectLocalFileStorage):
"""Webrecorder storage w/ Redis interface (local files).
:ivar StrictRedis redis: Redis interface
"""
def __init__(self, redis):
"""Initialize Webrecorder storage w/ Redis interface.
:param StrictRedis redis: Redis interface
"""
self.redis = redis
super(LocalFileStorage, self).__init__()
### BEGIN PERMA CUSTOMIZATIONS
### First pass at https://github.com/harvard-lil/perma/issues/2614
def delete_collection(self, collection):
"""Delete collection.
:param collection: collection
:type: n.s.
:returns: whether successful or not
:rtype: bool
"""
path = collection.get_dir_path()
if path:
try:
dirpath = os.path.join(self.storage_root, path)
return (self.redis.publish('handle_delete_dir', dirpath) > 0)
except Exception:
logger.error("Failed attempt to delete collection {}".format(collection), exc_info=True)
return False
return False
### END PERMA CUSTOMIZATIONS
def do_delete(self, target_url, client_url):
"""Delete file.
:param str target_url: target URL
:param str client_url: client URL (unused argument)
:returns: whether successful or not
:rtype: bool
"""
return self.redis.publish('handle_delete_file', target_url) > 0
def get_checksum_and_size(self, filepath_or_url):
"""Returns the checksum of the supplied URL or filepath and the size of the resource
:param str filepath_or_url: The URL or filepath to the resource that the checksum and size is desired for
:return: A three tuple containing the kind of checksum, the checksum itself, and size
:rtype: tuple[str|None, str|None, int|None]
"""
m = hashlib.md5()
amount = 1024 * 1024
total_size = 0
with closing(BlockLoader().load(filepath_or_url)) as f:
while True:
chunk = f.read(amount)
chunk_size = len(chunk)
if chunk_size == 0:
break
total_size += chunk_size
m.update(chunk)
return 'md5', m.hexdigest(), total_size
|
cape_privacy/pandas/transformations/test_utils.py | vismaya-Kalaiselvan/cape-python | 144 | 26612 | <gh_stars>100-1000
import pandas as pd
class PlusN:
"""A sample transform that adds n to a specific field.
Attributes:
field: The field that this transform will be applied to.
n: The value to add to the field.
"""
identifier = "plusN"
type_signature = "col->col"
def __init__(self, n: int = 1) -> None:
self.n = n
def __call__(self, column: pd.Series) -> pd.Series:
return column + self.n
|
ext/testlib/suite.py | mandaltj/gem5_chips | 135 | 26640 | <gh_stars>100-1000
# Copyright (c) 2017 <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
import helper
import runner as runner_mod
class TestSuite(object):
'''
An object grouping a collection of tests. It provides tags which enable
filtering during list and run selection. All tests held in the suite must
have a unique name.
..note::
The :func:`__new__` method enables collection of test cases, it must
be called in order for test cases to be collected.
..note::
To reduce test definition boilerplate, the :func:`init` method is
forwarded all `*args` and `**kwargs`. This means derived classes can
define init without boilerplate super().__init__(*args, **kwargs).
'''
runner = runner_mod.SuiteRunner
collector = helper.InstanceCollector()
fixtures = []
tests = []
tags = set()
def __new__(klass, *args, **kwargs):
obj = super(TestSuite, klass).__new__(klass, *args, **kwargs)
TestSuite.collector.collect(obj)
return obj
def __init__(self, name=None, fixtures=tuple(), tests=tuple(),
tags=tuple(), **kwargs):
self.fixtures = self.fixtures + list(fixtures)
self.tags = self.tags | set(tags)
self.tests = self.tests + list(tests)
if name is None:
name = self.__class__.__name__
self.name = name
def __iter__(self):
return iter(self.tests) |
rotkehlchen/accounting/export/csv.py | rotkehlchenio/rotkehlchen | 137 | 26690 | <reponame>rotkehlchenio/rotkehlchen
import json
import logging
from csv import DictWriter
from pathlib import Path
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Tuple
from zipfile import ZIP_DEFLATED, ZipFile
from rotkehlchen.accounting.pnl import PnlTotals
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.fval import FVal
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import Timestamp
from rotkehlchen.utils.mixins.customizable_date import CustomizableDateMixin
from rotkehlchen.utils.version_check import get_current_version
if TYPE_CHECKING:
from rotkehlchen.accounting.structures.processed_event import ProcessedAccountingEvent
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
FILENAME_ALL_CSV = 'all_events.csv'
ETH_EXPLORER = 'https://etherscan.io/tx/'
ACCOUNTING_SETTINGS = (
'include_crypto2crypto',
'taxfree_after_period',
'include_gas_costs',
'account_for_assets_movements',
'calculate_past_cost_basis',
)
CSV_INDEX_OFFSET = 2 # skip title row and since counting starts from 1
class CSVWriteError(Exception):
pass
def _dict_to_csv_file(path: Path, dictionary_list: List) -> None:
"""Takes a filepath and a list of dictionaries representing the rows and writes them
into the file as a CSV
May raise:
- CSVWriteError if DictWriter.writerow() tried to write a dict contains
fields not in fieldnames
"""
if len(dictionary_list) == 0:
log.debug('Skipping writting empty CSV for {}'.format(path))
return
with open(path, 'w', newline='') as f:
w = DictWriter(f, fieldnames=dictionary_list[0].keys())
w.writeheader()
try:
for dic in dictionary_list:
w.writerow(dic)
except ValueError as e:
raise CSVWriteError(f'Failed to write {path} CSV due to {str(e)}') from e
class CSVExporter(CustomizableDateMixin):
def __init__(
self,
database: 'DBHandler',
):
super().__init__(database=database)
self.reset(start_ts=Timestamp(0), end_ts=Timestamp(0))
def reset(self, start_ts: Timestamp, end_ts: Timestamp) -> None:
self.start_ts = start_ts
self.end_ts = end_ts
self.reload_settings()
try:
frontend_settings = json.loads(self.settings.frontend_settings)
if (
'explorers' in frontend_settings and
'ETH' in frontend_settings['explorers'] and
'transaction' in frontend_settings['explorers']['ETH']
):
self.eth_explorer = frontend_settings['explorers']['ETH']['transaction']
else:
self.eth_explorer = ETH_EXPLORER
except (json.decoder.JSONDecodeError, KeyError):
self.eth_explorer = ETH_EXPLORER
def _add_sumif_formula(
self,
check_range: str,
condition: str,
sum_range: str,
actual_value: FVal,
) -> str:
if self.settings.pnl_csv_with_formulas is False:
return str(actual_value)
return f'=SUMIF({check_range};{condition};{sum_range})'
def _add_pnl_type(
self,
event: 'ProcessedAccountingEvent',
dict_event: Dict[str, Any],
amount_column: str,
name: Literal['free', 'taxable'],
) -> None:
"""Adds the pnl type value and cost basis to the passed dict event"""
if getattr(event.pnl, name, ZERO) == ZERO:
return
index = event.index + CSV_INDEX_OFFSET
value_formula = f'{amount_column}{index}*H{index}'
total_value_formula = f'(F{index}*H{index}+G{index}*H{index})' # noqa: E501 # formula of both free and taxable
cost_basis_column = 'K' if name == 'taxable' else 'L'
cost_basis = f'{cost_basis_column}{index}'
should_count_entire_spend_formula = (
name == 'taxable' and event.timestamp >= self.start_ts or
name == 'free' and event.timestamp < self.start_ts
)
if event.count_entire_amount_spend and should_count_entire_spend_formula:
equation = (
f'=IF({cost_basis}="",'
f'-{total_value_formula},'
f'-{total_value_formula}+{value_formula}-{cost_basis})'
)
else:
equation = (
f'=IF({cost_basis}="",'
f'{value_formula},'
f'{value_formula}-{cost_basis})'
)
dict_event[f'pnl_{name}'] = equation
cost_basis = ''
if event.cost_basis is not None:
for acquisition in event.cost_basis.matched_acquisitions:
if name == 'taxable' and acquisition.taxable is False:
continue
if name == 'free' and acquisition.taxable is True:
continue
index = acquisition.event.index + CSV_INDEX_OFFSET
if cost_basis == '':
cost_basis = '='
else:
cost_basis += '+'
cost_basis += f'{str(acquisition.amount)}*H{index}'
dict_event[f'cost_basis_{name}'] = cost_basis
def _maybe_add_summary(self, events: List[Dict[str, Any]], pnls: PnlTotals) -> None:
"""Depending on given settings, adds a few summary lines at the end of
the all events PnL report"""
if self.settings.pnl_csv_have_summary is False:
return
length = len(events) + 1
template: Dict[str, Any] = {
'type': '',
'notes': '',
'location': '',
'timestamp': '',
'asset': '',
'free_amount': '',
'taxable_amount': '',
'price': '',
'pnl_taxable': '',
'cost_basis_taxable': '',
'pnl_free': '',
'cost_basis_free': '',
}
events.append(template) # separate with 2 new lines
events.append(template)
entry = template.copy()
entry['taxable_amount'] = 'TAXABLE'
entry['price'] = 'FREE'
events.append(entry)
start_sums_index = length + 4
sums = 0
for name, value in pnls.items():
if value.taxable == ZERO and value.free == ZERO:
continue
sums += 1
entry = template.copy()
entry['free_amount'] = f'{str(name)} total'
entry['taxable_amount'] = self._add_sumif_formula(
check_range=f'A2:A{length}',
condition=f'"{str(name)}"',
sum_range=f'I2:I{length}',
actual_value=value.taxable,
)
entry['price'] = self._add_sumif_formula(
check_range=f'A2:A{length}',
condition=f'"{str(name)}"',
sum_range=f'J2:J{length}',
actual_value=value.free,
)
events.append(entry)
entry = template.copy()
entry['free_amount'] = 'TOTAL'
if sums != 0:
entry['taxable_amount'] = f'=SUM(G{start_sums_index}:G{start_sums_index+sums-1})'
entry['price'] = f'=SUM(H{start_sums_index}:H{start_sums_index+sums-1})'
else:
entry['taxable_amount'] = entry['price'] = 0
events.append(entry)
events.append(template) # separate with 2 new lines
events.append(template)
version_result = get_current_version(check_for_updates=False)
entry = template.copy()
entry['free_amount'] = 'rotki version'
entry['taxable_amount'] = version_result.our_version
events.append(entry)
for setting in ACCOUNTING_SETTINGS:
entry = template.copy()
entry['free_amount'] = setting
entry['taxable_amount'] = str(getattr(self.settings, setting))
events.append(entry)
def create_zip(
self,
events: List['ProcessedAccountingEvent'],
pnls: PnlTotals,
) -> Tuple[bool, str]:
# TODO: Find a way to properly delete the directory after send is complete
dirpath = Path(mkdtemp())
success, msg = self.export(events=events, pnls=pnls, directory=dirpath)
if not success:
return False, msg
files: List[Tuple[Path, str]] = [
(dirpath / FILENAME_ALL_CSV, FILENAME_ALL_CSV),
]
with ZipFile(file=dirpath / 'csv.zip', mode='w', compression=ZIP_DEFLATED) as csv_zip:
for path, filename in files:
if not path.exists():
continue
csv_zip.write(path, filename)
path.unlink()
success = False
filename = ''
if csv_zip.filename is not None:
success = True
filename = csv_zip.filename
return success, filename
def to_csv_entry(self, event: 'ProcessedAccountingEvent') -> Dict[str, Any]:
dict_event = event.to_exported_dict(
ts_converter=self.timestamp_to_date,
eth_explorer=self.eth_explorer,
for_api=False,
)
# For CSV also convert timestamp to date
dict_event['timestamp'] = self.timestamp_to_date(event.timestamp)
if self.settings.pnl_csv_with_formulas is False:
return dict_event
# else add formulas
self._add_pnl_type(event=event, dict_event=dict_event, amount_column='F', name='free')
self._add_pnl_type(event=event, dict_event=dict_event, amount_column='G', name='taxable')
return dict_event
def export(
self,
events: List['ProcessedAccountingEvent'],
pnls: PnlTotals,
directory: Path,
) -> Tuple[bool, str]:
serialized_events = [self.to_csv_entry(x) for idx, x in enumerate(events)]
self._maybe_add_summary(events=serialized_events, pnls=pnls)
try:
directory.mkdir(parents=True, exist_ok=True)
_dict_to_csv_file(
directory / FILENAME_ALL_CSV,
serialized_events,
)
except (CSVWriteError, PermissionError) as e:
return False, str(e)
return True, ''
|
src/abundance.py | Ilia-Abolhasani/modify_vamb | 111 | 26706 | import sys
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(
description="""Command-line bin abundance estimator.
Print the median RPKM abundance for each bin in each sample to STDOUT.
Will read the RPKM file into memory - beware.""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
parser.add_argument('rpkmpath', help='Path to RPKM file')
parser.add_argument('clusterspath', help='Path to clusters.tsv')
parser.add_argument('headerpath', help='Path to list of headers')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
# Check files
for infile in (args.rpkmpath, args.clusterspath, args.headerpath):
if not os.path.isfile(infile):
raise FileNotFoundError(infile)
# Load Vamb
sys.path.append('../vamb')
import vamb
# Load in files
with open(args.headerpath) as file:
indexof = {line.strip():i for i,line in enumerate(file)}
with open(args.clusterspath) as file:
clusters = vamb.vambtools.read_clusters(file)
# Check that all clusters names are in headers:
for cluster in clusters.values():
for header in cluster:
if header not in indexof:
raise KeyError("Header not found in headerlist: {}".format(header))
# Load RPKM and check it
rpkm = vamb.vambtools.read_npz(args.rpkmpath)
nsamples = rpkm.shape[1]
if len(indexof) != len(rpkm):
raise ValueError("Not the same number of headers as rows in RPKM file")
# Now estimate abundances
for clustername, cluster in clusters.items():
depths = np.empty((len(cluster), nsamples), dtype=np.float32)
for row, header in enumerate(cluster):
index = indexof[header]
depths[row] = rpkm[index]
median_depths = np.median(depths, axis=0)
print(clustername, end='\t')
print('\t'.join([str(i) for i in median_depths]))
|
pony/orm/tests/test_f_strings.py | luckydonald/pony | 2,628 | 26762 | <filename>pony/orm/tests/test_f_strings.py
from sys import version_info
if version_info[:2] >= (3, 6):
from pony.orm.tests.py36_test_f_strings import * |
applications/MultilevelMonteCarloApplication/python_scripts/statistical_variable_utilities.py | lkusch/Kratos | 778 | 26808 | <filename>applications/MultilevelMonteCarloApplication/python_scripts/statistical_variable_utilities.py
# Import Python libraries
import numpy as np
# Import distributed framework
from exaqute import *
try:
init()
except:
pass
try:
computing_units_auxiliar_utilities = int(os.environ["computing_units_auxiliar_utilities"])
except:
computing_units_auxiliar_utilities = 1
"""
auxiliary function of UpdateOnePassCentralMoments of the StatisticalVariable class
input: sample: new value that will update the statistics
old_mean : old mean
old_central_moment_1 : old first central moment
compute_M1 : boolean setting if computation is needed
old_central_moment_2 : old second central moment
compute_M2 : boolean setting if computation is needed
old_central_moment_3 : old third central moment
compute_M3 : boolean setting if computation is needed
old_central_moment_1 : old fourth central moment
compute_M4 : boolean settings if computation is needed
nsamples : old number of samples computed, starts from 1
output: new_mean : updated mean
new_sample_variance : updated sample variance
new_central_moment_1 : updated central_moment_1
new_central_moment_2 : updated central_moment_2
new_central_moment_3 : updated central_moment_3
new_central_moment_4 : updated central_moment_4
nsamples : updated number of samples
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=7,priority=True)
def UpdateOnePassCentralMomentsAux_Task(sample,old_mean,old_central_moment_1,compute_M1,old_central_moment_2,compute_M2,old_central_moment_3,compute_M3,old_central_moment_4,compute_M4,nsamples):
old_M1 = old_central_moment_1 * nsamples
old_M2 = old_central_moment_2 * nsamples
old_M3 = old_central_moment_3 * nsamples
old_M4 = old_central_moment_4 * nsamples
nsamples = nsamples + 1
if nsamples == 1:
new_mean = sample
new_M1 = 0.0
new_M2 = 0.0
new_sample_variance = 0.0
new_M3 = 0.0
new_M4 = 0.0
else:
delta = np.subtract(sample,old_mean)
new_mean = old_mean + np.divide(delta,nsamples)
if (compute_M1):
new_M1 = old_M1 # we are not updating, first central moment = 0.0
else:
new_M1 = old_M1 # we are not updating, first central moment = 0.0
if (compute_M2):
new_M2 = old_M2 + delta*np.subtract(sample,new_mean)
else:
raise Exception ("Not computing StatisticalVariable.central_moment_2, set StatisticalVariable.central_moment_2_to_compute to True")
new_sample_variance = np.divide(new_M2,np.subtract(nsamples,1))
if (compute_M3):
new_M3 = old_M3 - 3.0*old_M2*np.divide(delta,nsamples) + np.divide(np.multiply((nsamples-1)*(nsamples-2),(delta**3)),(nsamples**2))
else:
new_M3 = old_M3 # we are not updating
if (compute_M4):
new_M4 = old_M4 - 4.0*old_M3*np.divide(delta,nsamples) + 6.0*old_M2*np.divide(delta,nsamples)**2 + np.multiply((nsamples-1)*(nsamples**2-3*nsamples+3),np.divide(delta**4,nsamples**3))
else:
new_M4 = old_M4 # we are not updating
new_central_moment_1 = new_M1 / nsamples
new_central_moment_2 = new_M2 / nsamples
new_central_moment_3 = new_M3 / nsamples
new_central_moment_4 = new_M4 / nsamples
return new_mean,new_sample_variance,new_central_moment_1,new_central_moment_2,new_central_moment_3,new_central_moment_4,nsamples
"""
auxiliary function of UpdateOnePassPowerSums of the StatisticalVariable class
input: sample : new value that will update the statistics
old_S1 : old first power sum
old_S2 : old second power sum
old_S3 : old third power sum
old_S4 : old fourth power sum
nsamples : number of samples, it has already been updated in UpdateOnePassCentralMomentsAux_Task
output: new_S1 : updated first power sum
new_s2 : updated second power sum
new_S3 : updated third power sum
new_S4 : updated fourth power sum
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=5,priority=True)
def UpdateOnePassPowerSumsAux_Task(sample,old_S1,old_S2,old_S3,old_S4,nsamples):
nsamples = nsamples + 1
if nsamples == 1:
new_S1 = sample
new_S2 = sample**2
new_S3 = sample**3
new_S4 = sample**4
else:
new_S1 = old_S1 + sample
new_S2 = old_S2 + sample**2
new_S3 = old_S3 + sample**3
new_S4 = old_S4 + sample**4
return new_S1,new_S2,new_S3,new_S4,nsamples
"""
auxiliary function of UpdateGlobalPowerSums of the StatisticalVariable class
input: old_S1 : old first power sum
old_S2 : old second power sum
old_S3 : old third power sum
old_S4 : old fourth power sum
number_samples_level : number of samples, it has already been updated in UpdateOnePassCentralMomentsAux_Task
add_S1 : power sum order one to add
add_S2 : power sum order two to add
add_S3 : power sum order three to add
add_S4 : power sum order four to add
add_number_samples_level : number of samples to add
output: new_S1 : updated first power sum
new_s2 : updated second power sum
new_S3 : updated third power sum
new_S4 : updated fourth power sum
number_samples_level : number of samples of current level
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=5,priority=True)
def UpdateGlobalPowerSumsAux_Task(old_S1,old_S2,old_S3,old_S4,number_samples_level,add_S1,add_S2,add_S3,add_S4,add_number_samples_level):
new_S1 = old_S1 + add_S1
new_S2 = old_S2 + add_S2
new_S3 = old_S3 + add_S3
new_S4 = old_S4 + add_S4
number_samples_level = number_samples_level + add_number_samples_level
return new_S1,new_S2,new_S3,new_S4,number_samples_level
"""
function unfolding values from a list, needed by PyCOMPSs for list of lists
input: sample : the list of lists
output: sample[*] : list position * of the list of lists
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=4, priority=True)
def UnfoldValuesAux_Task(sample):
return sample[0], sample[1], sample[2], sample[3]
"""
auxiliary function of UpdateBatchesPassPowerSums
input: samples : list of samples
output: return the sum, done in mini_batch_size batches, of the samples components
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=1,priority=True)
def UpdateBatchesPassPowerSumsAux_Task(*samples):
samples_list = np.array(list(samples))
return np.sum(samples_list, axis = 0)
"""
if nsamples == 0:
new_S1 = samples[0]
new_S2 = samples[0]**2
new_S3 = samples[0]**3
new_S4 = samples[0]**4
old_S1 = new_S1
old_S2 = new_S2
old_S3 = new_S3
old_S4 = new_S4
nsamples = 1
samples=samples[1:]
for sample in samples:
nsamples = nsamples + 1
new_S1 = old_S1 + sample
new_S2 = old_S2 + sample**2
new_S3 = old_S3 + sample**3
new_S4 = old_S4 + sample**4
old_S1 = new_S1
old_S2 = new_S2
old_S3 = new_S3
old_S4 = new_S4
return new_S1,new_S2,new_S3,new_S4,nsamples
"""
"""
auxiliary function of UpdateHStatistics of the StatisticalVariable class
input: S1_level : first power sum at defined level
S2_level : second power sum at defined level
S3_level : third power sum at defined level
S4_level : fourth power sum at defined level
number_samples_level : number of samples (already update) for defined level
output: h1_level : first h statistics for defined level
h2_level : second h statistics for defined level
h3_level : third h statistics for defined level
h4_level : fourth h statistics for defined level
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=4,priority=True)
def ComputeHStatisticsAux_Task(S1_level,S2_level,S3_level,S4_level,number_samples_level):
h1_level = S1_level / number_samples_level
h2_level = (number_samples_level*S2_level-S1_level**2) / ((number_samples_level-1)*number_samples_level)
h3_level = (number_samples_level**2*S3_level-3*number_samples_level*S2_level*S1_level+2*S1_level**3) / \
((number_samples_level-2)*(number_samples_level-1)*number_samples_level)
h4_level = ((-4*number_samples_level**2+8*number_samples_level-12)*S3_level*S1_level+ \
(number_samples_level**3-2*number_samples_level**2+3*number_samples_level)*S4_level+ \
6*number_samples_level*S2_level*S1_level**2+(9-6*number_samples_level)*S2_level**2-3*S1_level**4) / \
((number_samples_level-3)*(number_samples_level-2)*(number_samples_level-1)*number_samples_level)
return h1_level,h2_level,h3_level,h4_level
"""
auxiliary function of ComputeSkewnessKurtosis of the StatisticalVariable class
input: h2_level : second h statistics for defined level
h3_level : third h statistics for defined level
h4_level : fourth h statistics for defined level
output: skewness_level : skewness for defined level
kurtosis_level : kurtosis for defined level
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=2,priority=True)
def ComputeSkewnessKurtosisAux_Task(h2_level,h3_level,h4_level):
skewness_level = h3_level / (np.sqrt(h2_level**3))
kurtosis_level = h4_level / (h2_level**2)
return skewness_level,kurtosis_level
"""
auxiliary function of ComputeSampleCentralMomentsFromScratch of the StatisticalVariable class
input: sample: new value that will update the statistics
number_samples_level : number of samples for defined level
central_moment_from_scratch_1_to_compute : boolean setting if computation is needed
central_moment_from_scratch_2_to_compute : boolean setting if computation is needed
central_moment_from_scratch_3_to_compute : boolean setting if computation is needed
central_moment_from_scratch_3_absolute_to_compute : boolean setting if computation is needed
central_moment_from_scratch_4_to_compute : boolean setting if computation is needed
central_moment_from_scratch_1 : old first central moment
central_moment_from_scratch_2 : old second central moment
central_moment_from_scratch_3 : old third central moment
central_moment_from_scratch_3_absolute : old third central moment absolute value
central_moment_from_scratch_4 : old fourth central moment
output: central_moment_from_scratch_1 : updated first central moment
central_moment_from_scratch_2 : updated second central moment
central_moment_from_scratch_3 : updated third central moment
central_moment_from_scratch_3_absolute : updated third central moment absolute value
central_moment_from_scratch_4 : update fourth central moment
"""
@constraint(computing_units=computing_units_auxiliar_utilities)
@task(keep=True,returns=5,priority=True)
def ComputeSampleCentralMomentsFromScratchAux_Task(number_samples_level,central_moment_from_scratch_1_to_compute,central_moment_from_scratch_2_to_compute, \
central_moment_from_scratch_3_to_compute,central_moment_from_scratch_3_absolute_to_compute,central_moment_from_scratch_4_to_compute, \
central_moment_from_scratch_1,central_moment_from_scratch_2,central_moment_from_scratch_3,central_moment_from_scratch_3_absolute,central_moment_from_scratch_4, \
samples):
# generate a single list from a list of lists
samples = [item for sublist in samples for item in sublist]
# compute the mean
auxiliary_mean = 0.0
for sample in samples:
auxiliary_mean = auxiliary_mean + sample
curr_mean = auxiliary_mean / number_samples_level
for sample in samples:
if (central_moment_from_scratch_1_to_compute):
central_moment_from_scratch_1 = central_moment_from_scratch_1 + ((sample - curr_mean)**1) / number_samples_level
if (central_moment_from_scratch_2_to_compute):
central_moment_from_scratch_2 = central_moment_from_scratch_2 + ((sample - curr_mean)**2) / number_samples_level
if (central_moment_from_scratch_3_to_compute):
central_moment_from_scratch_3 = central_moment_from_scratch_3 + ((sample - curr_mean)**3) / number_samples_level
if (central_moment_from_scratch_3_absolute_to_compute):
central_moment_from_scratch_3_absolute = central_moment_from_scratch_3_absolute + (np.abs(sample - curr_mean)**3) / number_samples_level
if (central_moment_from_scratch_4_to_compute):
central_moment_from_scratch_4 = central_moment_from_scratch_4 + ((sample - curr_mean)**4) / number_samples_level
return central_moment_from_scratch_1,central_moment_from_scratch_2,central_moment_from_scratch_3,central_moment_from_scratch_3_absolute,central_moment_from_scratch_4
class StatisticalVariable(object):
"""The base class for statistical variables"""
def __init__(self):
"""constructor of the class
Keyword arguments:
self : an instance of a class
"""
# values of the variable, organized per level
self.values = []
# mean of the variable per each level
self.raw_moment_1 = []
# sample variance of the variable per each level
self.unbiased_central_moment_2 = []
# moments of the variable per each level M_p = n * mu_p
# mu_p = p-th central moment
# n = number of values
self.central_moment_1 = []
self.central_moment_2 = []
self.central_moment_3 = []
self.central_moment_4 = []
# set which central moments will be computed (moment_2 is mandatory to be computed because it is exploited in the mean evaluation)
self.central_moment_1_to_compute = True
self.central_moment_2_to_compute = True
self.central_moment_3_to_compute = True
self.central_moment_4_to_compute = True
# bias error of the variable
self.bias_error = None
# statistical error of the variable
self.statistical_error = None
# type of variable: scalar or field
self.type = None
# number of samples of the variable
self.number_samples = None
self.batches_number_samples = []
# global power sums
# S_p = \sum_{i=1}^{n} Q(sample_i)**p, organized per level
self.power_sum_1 = []
self.power_sum_2 = []
self.power_sum_3 = []
self.power_sum_4 = []
# power sums batches
self.power_sum_batches_1 = []
self.power_sum_batches_2 = []
self.power_sum_batches_3 = []
self.power_sum_batches_4 = []
# sample central moments \mu_p = \sum_{i=1}^{n} (Q(sample_i)-mean_n)**p / n, organized per level
self.central_moment_from_scratch_1 = []
self.central_moment_from_scratch_2 = []
self.central_moment_from_scratch_3 = []
self.central_moment_from_scratch_3_absolute = [] # \mu_p = \sum_{i=1}^{n} abs((Q(sample_i)-mean_n)**p) / n
self.central_moment_from_scratch_4 = []
self.central_moment_from_scratch_1_to_compute = False
self.central_moment_from_scratch_2_to_compute = False
self.central_moment_from_scratch_3_to_compute = False
self.central_moment_from_scratch_3_absolute_to_compute = False
self.central_moment_from_scratch_4_to_compute = False
# h-statistics h_p, the unbiased central moment estimator with minimal variance, organized per level
self.h_statistics_1 = []
self.h_statistics_2 = []
self.h_statistics_3 = []
self.h_statistics_4 = []
self.h_statistics_computed = False
# skewness of the variable per each level
self.skewness = []
# kurtosis of the variable per each level
self.kurtosis = []
# convergence criteria of the algorithm
self.convergence_criteria = None
"""
function initializing variables of the Statistical Variable class in lists given number of levels
input: self : an instance of the class
number_levels : number of levels considered
number_initial_batches : number of batches of iteration zero
"""
def InitializeLists(self,number_levels,number_initial_batches):
self.number_samples = [0 for _ in range (number_levels)]
self.values = [[[] for _ in range (number_levels)] for _ in range (number_initial_batches)]
self.raw_moment_1 = [[] for _ in range (number_levels)]
self.central_moment_1 = [[] for _ in range (number_levels)]
self.central_moment_2 = [[] for _ in range (number_levels)]
self.central_moment_3 = [[] for _ in range (number_levels)]
self.central_moment_4 = [[] for _ in range (number_levels)]
self.unbiased_central_moment_2 = [[] for _ in range (number_levels)]
self.power_sum_1 = [0 for _ in range (number_levels)]
self.power_sum_2 = [0 for _ in range (number_levels)]
self.power_sum_3 = [0 for _ in range (number_levels)]
self.power_sum_4 = [0 for _ in range (number_levels)]
self.power_sum_batches_1 = [[[] for _ in range (number_levels)] for _ in range (number_initial_batches)]
self.power_sum_batches_2 = [[[] for _ in range (number_levels)] for _ in range (number_initial_batches)]
self.power_sum_batches_3 = [[[] for _ in range (number_levels)] for _ in range (number_initial_batches)]
self.power_sum_batches_4 = [[[] for _ in range (number_levels)] for _ in range (number_initial_batches)]
self.h_statistics_1 = [[] for _ in range (number_levels)]
self.h_statistics_2 = [[] for _ in range (number_levels)]
self.h_statistics_3 = [[] for _ in range (number_levels)]
self.h_statistics_4 = [[] for _ in range (number_levels)]
self.skewness = [[] for _ in range (number_levels)]
self.kurtosis = [[] for _ in range (number_levels)]
self.central_moment_from_scratch_1 = [[] for _ in range (number_levels)]
self.central_moment_from_scratch_2 = [[] for _ in range (number_levels)]
self.central_moment_from_scratch_3 = [[] for _ in range (number_levels)]
self.central_moment_from_scratch_3_absolute = [[] for _ in range (number_levels)]
self.central_moment_from_scratch_4 = [[] for _ in range (number_levels)]
self.batches_number_samples = [[0 for _ in range (number_levels)] for _ in range (number_initial_batches)]
"""
function updating statistic moments and number of samples
input: self : an instance of the class
level : defined level
i_sample : defined sample in level
"""
def UpdateOnePassCentralMoments(self,level,i_sample):
number_samples_level = self.number_samples[level]
sample = self.values[level][i_sample]
old_mean = self.raw_moment_1[level]
# old_M1 = self.central_moment_1[level] * number_samples_level
old_central_moment_1 = self.central_moment_1[level]
compute_M1 = self.central_moment_1_to_compute
# old_M2 = self.central_moment_2[level] * number_samples_level
old_central_moment_2 = self.central_moment_2[level]
compute_M2 = self.central_moment_2_to_compute
# old_M3 = self.central_moment_3[level] * number_samples_level
old_central_moment_3 = self.central_moment_3[level]
compute_M3 = self.central_moment_3_to_compute
# old_M4 = self.central_moment_4[level] * number_samples_level
old_central_moment_4 = self.central_moment_4[level]
compute_M4 = self.central_moment_4_to_compute
new_mean,new_sample_variance,new_central_moment_1,new_central_moment_2,new_central_moment_3,new_central_moment_4,number_samples_level \
= UpdateOnePassCentralMomentsAux_Task(sample,old_mean,old_central_moment_1,compute_M1,old_central_moment_2,compute_M2,old_central_moment_3,compute_M3,old_central_moment_4,compute_M4,number_samples_level)
self.raw_moment_1[level] = new_mean
self.unbiased_central_moment_2[level] = new_sample_variance
self.central_moment_1[level] = new_central_moment_1
self.central_moment_2[level] = new_central_moment_2
self.central_moment_3[level] = new_central_moment_3
self.central_moment_4[level] = new_central_moment_4
self.number_samples[level] = number_samples_level
"""
function updating the power sums S_p
input: self : an instance of the class
level : defined level
i_sample : defined sample in level
"""
def UpdateOnePassPowerSums(self,level,i_sample):
sample = self.values[level][i_sample]
old_S1 = self.power_sum_1[level]
old_S2 = self.power_sum_2[level]
old_S3 = self.power_sum_3[level]
old_S4 = self.power_sum_4[level]
number_samples_level = self.number_samples[level]
new_S1,new_S2,new_S3,new_S4,number_samples_level = UpdateOnePassPowerSumsAux_Task(sample,old_S1,old_S2,old_S3,old_S4,number_samples_level)
self.power_sum_1[level] = new_S1
self.power_sum_2[level] = new_S2
self.power_sum_3[level] = new_S3
self.power_sum_4[level] = new_S4
self.number_samples[level] = number_samples_level
"""
function updating the global power sums
input: self : an instance of the class
level : current level
batch_counter : current batch
"""
def UpdateGlobalPowerSums(self,level,batch_counter):
old_S1 = self.power_sum_1[level]
old_S2 = self.power_sum_2[level]
old_S3 = self.power_sum_3[level]
old_S4 = self.power_sum_4[level]
number_samples_level = self.number_samples[level]
add_S1 = self.power_sum_batches_1[batch_counter][level]
add_S2 = self.power_sum_batches_2[batch_counter][level]
add_S3 = self.power_sum_batches_3[batch_counter][level]
add_S4 = self.power_sum_batches_4[batch_counter][level]
add_number_samples_level = self.batches_number_samples[batch_counter][level]
new_S1,new_S2,new_S3,new_S4,number_samples_level = UpdateGlobalPowerSumsAux_Task(old_S1,old_S2,old_S3,old_S4,number_samples_level,add_S1,add_S2,add_S3,add_S4,add_number_samples_level)
self.power_sum_1[level] = new_S1
self.power_sum_2[level] = new_S2
self.power_sum_3[level] = new_S3
self.power_sum_4[level] = new_S4
self.number_samples[level] = number_samples_level
"""
function updating the in-batch power sums
input: self : an instance of the class
level : current level
batch_counter : current batch
mini_batch : size such that we update the in-batch power sums with mini_batch samples
"""
def UpdateBatchesPassPowerSum(self,level,batch_counter,mini_batch=50):
samples = self.values[batch_counter][level]
#for mini_batch in range (0,len(samples)):
while len(samples) > 1:
mini_batches_samples = samples[:mini_batch]
samples = samples[mini_batch:]
new_power_sums = UpdateBatchesPassPowerSumsAux_Task(*mini_batches_samples)
samples.append(new_power_sums)
new_S1, new_S2, new_S3, new_S4 = UnfoldValuesAux_Task(samples[0])
self.power_sum_batches_1[batch_counter][level] = new_S1
self.power_sum_batches_2[batch_counter][level] = new_S2
self.power_sum_batches_3[batch_counter][level] = new_S3
self.power_sum_batches_4[batch_counter][level] = new_S4
"""
function computing the h statistics h_p from the power sums
input: self : an instance of the class
level : defined level
"""
def ComputeHStatistics(self,level):
number_samples_level = self.number_samples[level]
S1_level = self.power_sum_1[level]
S2_level = self.power_sum_2[level]
S3_level = self.power_sum_3[level]
S4_level = self.power_sum_4[level]
self.h_statistics_computed = True
h1_level,h2_level,h3_level,h4_level = ComputeHStatisticsAux_Task(S1_level,S2_level,S3_level,S4_level,number_samples_level)
self.h_statistics_1[level] = h1_level
self.h_statistics_2[level] = h2_level
self.h_statistics_3[level] = h3_level
self.h_statistics_4[level] = h4_level
"""
function computing from scratch the central moments and the absolute third central moment
input: self : an instance of the class
level : defined level
"""
def ComputeSampleCentralMomentsFromScratch(self,level,number_samples_level):
# initialize central moments
central_moment_from_scratch_1 = 0.0
central_moment_from_scratch_2 = 0.0
central_moment_from_scratch_3 = 0.0
central_moment_from_scratch_3_absolute = 0.0
central_moment_from_scratch_4 = 0.0
central_moment_from_scratch_1_to_compute = self.central_moment_from_scratch_1_to_compute
central_moment_from_scratch_2_to_compute = self.central_moment_from_scratch_2_to_compute
central_moment_from_scratch_3_to_compute = self.central_moment_from_scratch_3_to_compute
central_moment_from_scratch_3_absolute_to_compute = self.central_moment_from_scratch_3_absolute_to_compute
central_moment_from_scratch_4_to_compute = self.central_moment_from_scratch_4_to_compute
samples = []
for batch in range (len(self.values)):
for mini_batch_samples in self.values[batch][level]:
samples.append(mini_batch_samples)
central_moment_from_scratch_1,central_moment_from_scratch_2,central_moment_from_scratch_3,central_moment_from_scratch_3_absolute,central_moment_from_scratch_4 = \
ComputeSampleCentralMomentsFromScratchAux_Task(number_samples_level,central_moment_from_scratch_1_to_compute, \
central_moment_from_scratch_2_to_compute,central_moment_from_scratch_3_to_compute,central_moment_from_scratch_3_absolute_to_compute,central_moment_from_scratch_4_to_compute, \
central_moment_from_scratch_1,central_moment_from_scratch_2,central_moment_from_scratch_3,central_moment_from_scratch_3_absolute,central_moment_from_scratch_4, samples)
self.central_moment_from_scratch_1[level] = central_moment_from_scratch_1
self.central_moment_from_scratch_2[level] = central_moment_from_scratch_2
self.central_moment_from_scratch_3[level] = central_moment_from_scratch_3
self.central_moment_from_scratch_3_absolute[level] = central_moment_from_scratch_3_absolute
self.central_moment_from_scratch_4[level] = central_moment_from_scratch_4
"""
function computing the skewness and the kurtosis from the h statistics
skewness = \mu_3 / \sqrt(\mu_2^3)
kurtosis = \mu_4 / \mu_2^2
input: self : an instance of the class
level : defined level
"""
def ComputeSkewnessKurtosis(self,level):
if (self.h_statistics_computed):
h2_level = self.h_statistics_2[level]
h3_level = self.h_statistics_3[level]
h4_level = self.h_statistics_4[level]
skewness_level,kurtosis_level =ComputeSkewnessKurtosisAux_Task(h2_level,h3_level,h4_level)
self.skewness[level] = skewness_level
self.kurtosis[level] = kurtosis_level
|
contrib/experiments/interpretation/penobscot/local/default.py | elmajdma/seismic-deeplearning | 270 | 26829 | <gh_stars>100-1000
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = "output" # This will be the base directory for all output, such as logs and saved models
_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.LOG_CONFIG = "logging.conf"
_C.SEED = 42
_C.OPENCV_BORDER_CONSTANT = 0
# size of voxel cube: WINDOW_SIZE x WINDOW_SIZE x WINDOW_SIZE; used for 3D models only
_C.WINDOW_SIZE = 65
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ""
_C.DATASET.NUM_CLASSES = 7
_C.DATASET.CLASS_WEIGHTS = [
0.02630481,
0.05448931,
0.0811898,
0.01866496,
0.15868563,
0.0875993,
0.5730662,
]
_C.DATASET.INLINE_HEIGHT = 1501
_C.DATASET.INLINE_WIDTH = 481
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = "resnet_unet"
_C.MODEL.IN_CHANNELS = 1
_C.MODEL.PRETRAINED = ""
_C.MODEL.EXTRA = CN(new_allowed=True)
# training
_C.TRAIN = CN()
_C.TRAIN.COMPLETE_PATCHES_ONLY = True
_C.TRAIN.MIN_LR = 0.001
_C.TRAIN.MAX_LR = 0.01
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 300
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.WEIGHT_DECAY = 0.0001
_C.TRAIN.SNAPSHOTS = 5
_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR
_C.TRAIN.AUGMENTATION = True
_C.TRAIN.STRIDE = 64
_C.TRAIN.PATCH_SIZE = 128
_C.TRAIN.MEAN = [-0.0001777, 0.49, -0.0000688] # 0.0009996710808862074
_C.TRAIN.STD = [0.14076, 0.2717, 0.06286] # 0.20976548783479299
_C.TRAIN.MAX = 1
_C.TRAIN.DEPTH = "patch" # Options are none, patch, and section
# None adds no depth information and the num of channels remains at 1
# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3
# Section adds depth per section so contains depth information for the whole section, channels=3
_C.TRAIN.AUGMENTATIONS = CN()
_C.TRAIN.AUGMENTATIONS.RESIZE = CN()
_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT = 256
_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH = 256
_C.TRAIN.AUGMENTATIONS.PAD = CN()
_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT = 256
_C.TRAIN.AUGMENTATIONS.PAD.WIDTH = 256
# validation
_C.VALIDATION = CN()
_C.VALIDATION.BATCH_SIZE_PER_GPU = 32
_C.VALIDATION.COMPLETE_PATCHES_ONLY = True
# TEST
_C.TEST = CN()
_C.TEST.MODEL_PATH = ""
_C.TEST.COMPLETE_PATCHES_ONLY = True
_C.TEST.AUGMENTATIONS = CN()
_C.TEST.AUGMENTATIONS.RESIZE = CN()
_C.TEST.AUGMENTATIONS.RESIZE.HEIGHT = 256
_C.TEST.AUGMENTATIONS.RESIZE.WIDTH = 256
_C.TEST.AUGMENTATIONS.PAD = CN()
_C.TEST.AUGMENTATIONS.PAD.HEIGHT = 256
_C.TEST.AUGMENTATIONS.PAD.WIDTH = 256
def update_config(cfg, options=None, config_file=None):
cfg.defrost()
if config_file:
cfg.merge_from_file(config_file)
if options:
cfg.merge_from_list(options)
cfg.freeze()
if __name__ == "__main__":
import sys
with open(sys.argv[1], "w") as f:
print(_C, file=f)
|
src/backend/common/sitevars/flask_secrets.py | ofekashery/the-blue-alliance | 266 | 26831 | from typing import TypedDict
from backend.common.sitevars.sitevar import Sitevar
class ContentType(TypedDict):
secret_key: str
class FlaskSecrets(Sitevar[ContentType]):
DEFAULT_SECRET_KEY: str = "thebluealliance"
@staticmethod
def key() -> str:
return "flask.secrets"
@staticmethod
def description() -> str:
return "Secret key for Flask session"
@staticmethod
def default_value() -> ContentType:
return ContentType(secret_key=FlaskSecrets.DEFAULT_SECRET_KEY)
@classmethod
def secret_key(cls) -> str:
secret_key = cls.get().get("secret_key")
return secret_key if secret_key else FlaskSecrets.DEFAULT_SECRET_KEY
|
mmdet/ops/nms_rotated/nms_rotated_wrapper.py | vpeopleonatank/OBBDetection | 274 | 26841 | <reponame>vpeopleonatank/OBBDetection
import BboxToolkit as bt
import numpy as np
import torch
from . import nms_rotated_ext
def obb2hbb(obboxes):
center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=1)
Cos, Sin = torch.cos(theta), torch.sin(theta)
x_bias = torch.abs(w/2 * Cos) + torch.abs(h/2 * Sin)
y_bias = torch.abs(w/2 * Sin) + torch.abs(h/2 * Cos)
bias = torch.cat([x_bias, y_bias], dim=1)
return torch.cat([center-bias, center+bias], dim=1)
def obb_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
if dets_th.numel() == 0:
inds = dets_th.new_zeros(0, dtype=torch.int64)
else:
# same bug will happen when bboxes is too small
too_small = dets_th[:, [2, 3]].min(1)[0] < 0.001
if too_small.all():
inds = dets_th.new_zeros(0, dtype=torch.int64)
else:
ori_inds = torch.arange(dets_th.size(0))
ori_inds = ori_inds[~too_small]
dets_th = dets_th[~too_small]
bboxes, scores = dets_th[:, :5], dets_th[:, 5]
inds = nms_rotated_ext.nms_rotated(bboxes, scores, iou_thr)
inds = ori_inds[inds]
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def poly_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
if dets_th.device == torch.device('cpu'):
raise NotImplementedError
inds = nms_rotated_ext.nms_poly(dets_th.float(), iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def BT_nms(dets, iou_thr, device_id=None):
if isinstance(dets, torch.Tensor):
is_tensor = True
device = dets.device
dets_np = dets.cpu().numpy()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_np = dets
else:
raise TypeError('dets must be eithr a Tensor or numpy array, '
f'but got {type(dets)}')
bboxes, scores = dets_np[:, :-1], dets_np[:, -1]
inds = bt.bbox_nms(bboxes, scores, iou_thr=iou_thr, score_thr=0)
if is_tensor:
inds = torch.from_numpy(inds).to(device)
return dets[inds, :], inds
def arb_batched_nms(bboxes, scores, inds, nms_cfg, class_agnostic=False):
nms_cfg_ = nms_cfg.copy()
class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
if class_agnostic:
bboxes_for_nms = bboxes
else:
hbboxes = obb2hbb(bboxes) if bboxes.size(-1) == 5 else bboxes
max_coordinate = hbboxes.max() - hbboxes.min()
offsets = inds.to(bboxes) * (max_coordinate + 1)
if bboxes.size(-1) == 5:
bboxes_for_nms = bboxes.clone()
bboxes_for_nms[:, :2] = bboxes_for_nms[:, :2] + offsets[:, None]
else:
bboxes_for_nms = bboxes + offsets[:, None]
nms_type = nms_cfg_.pop('type', 'BT_nms')
try:
nms_op = eval(nms_type)
except NameError:
from ..nms import nms_wrapper
nms_op = getattr(nms_wrapper, nms_type)
dets, keep = nms_op(
torch.cat([bboxes_for_nms, scores[:, None]], -1), **nms_cfg_)
bboxes = bboxes[keep]
scores = dets[:, -1]
return torch.cat([bboxes, scores[:, None]], -1), keep
|
stronghold/tests/testmixins.py | davitovmasyan/django-stronghold | 252 | 26846 | from stronghold.views import StrongholdPublicMixin
import django
from django.views.generic import View
from django.views.generic.base import TemplateResponseMixin
if django.VERSION[:2] < (1, 9):
from django.utils import unittest
else:
import unittest
class StrongholdMixinsTests(unittest.TestCase):
def test_public_mixin_sets_attr(self):
class TestView(StrongholdPublicMixin, View):
pass
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
def test_public_mixin_sets_attr_with_multiple_mixins(self):
class TestView(StrongholdPublicMixin, TemplateResponseMixin, View):
template_name = 'dummy.html'
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
|
pydis_site/apps/api/migrations/0055_reminder_mentions.py | Numerlor/site | 700 | 26851 | <filename>pydis_site/apps/api/migrations/0055_reminder_mentions.py
# Generated by Django 2.2.14 on 2020-07-15 07:37
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0054_user_invalidate_unknown_role'),
]
operations = [
migrations.AddField(
model_name='reminder',
name='mentions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0, message='Mention IDs cannot be negative.')]), blank=True, default=list, help_text='IDs of roles or users to ping with the reminder.', size=None),
),
]
|
hata/ext/extension_loader/client_extension.py | Multiface24111/hata | 173 | 26853 | <gh_stars>100-1000
__all__ = ()
from ...backend.utils import KeepType
from ...discord.client import Client
from .extension import EXTENSIONS, EXTENSION_STATE_LOADED
@KeepType(Client)
class Client:
@property
def extensions(self):
"""
Returns a list of extensions added to the client. Added by the `extension_loader` extension.
Returns
-------
extensions : `list` of ``Extension``
"""
extensions = []
for extension in EXTENSIONS.values():
if extension._state == EXTENSION_STATE_LOADED:
snapshot_difference = extension._snapshot_difference
if (snapshot_difference is not None):
for client, client_snapshot_difference in snapshot_difference:
if (self is client) and client_snapshot_difference:
extensions.append(extension)
break
return extensions
|
MANN/Utils/similarities.py | jgyllinsky/How-to-Learn-from-Little-Data | 161 | 26859 | import tensorflow as tf
def cosine_similarity(x, y, eps=1e-6):
z = tf.batch_matmul(x, tf.transpose(y, perm=[0,2,1]))
z /= tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.multiply(x,x), 2), 2),tf.expand_dims(tf.reduce_sum(tf.multiply(y,y), 2), 1)) + eps)
return z
|
data/synthetic/analyze.py | thonic/pyhawkes | 221 | 26877 | import gzip
import pickle
import os
def analyze(data_path):
"""
Run the comparison on the given data file
:param data_path:
:return:
"""
if data_path.endswith(".gz"):
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
else:
with open(data_path, 'r') as f:
S, true_model = pickle.load(f)
print("True model:")
print(true_model)
T = float(S.shape[0])
N = S.sum(axis=0)
print("lambda0: ", true_model.bias_model.lambda0.mean())
print("Average event count: ", N.mean(), " +- ", N.std())
print("Average event count: ", (N/T).mean(), " +- ", (N/T).std())
# seed = 2650533028
K = 50
C = 5
T = 100000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
analyze(data_path)
|
rl_coach/graph_managers/hrl_graph_manager.py | jl45621/coach | 1,960 | 26882 | <reponame>jl45621/coach
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Union, Tuple
from rl_coach.base_parameters import AgentParameters, VisualizationParameters, TaskParameters, \
PresetValidationParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.environments.environment import EnvironmentParameters, Environment
from rl_coach.graph_managers.graph_manager import GraphManager, ScheduleParameters
from rl_coach.level_manager import LevelManager
from rl_coach.utils import short_dynamic_import
class HRLGraphManager(GraphManager):
"""
A simple HRL graph manager creates a deep hierarchy with a single composite agent per hierarchy level, and a single
environment which is interacted with.
"""
def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters,
schedule_params: ScheduleParameters, vis_params: VisualizationParameters,
consecutive_steps_to_run_each_level: Union[EnvironmentSteps, List[EnvironmentSteps]],
preset_validation_params: PresetValidationParameters = PresetValidationParameters()):
"""
:param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the
hierarchy to the bottom level
:param env_params: the parameters of the environment
:param schedule_params: the parameters for scheduling the graph
:param vis_params: the visualization parameters
:param consecutive_steps_to_run_each_level: the number of time steps that each level is ran.
for example, when the top level gives the bottom level a goal, the bottom level can act for
consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either
an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list.
"""
super().__init__('hrl_graph', schedule_params, vis_params)
self.agents_params = agents_params
self.env_params = env_params
self.preset_validation_params = preset_validation_params
if isinstance(consecutive_steps_to_run_each_level, list):
if len(consecutive_steps_to_run_each_level) != len(self.agents_params):
raise ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match "
"the number of levels in the hierarchy. Alternatively, it is possible to use a single "
"value for all the levels, by passing an EnvironmentSteps")
elif isinstance(consecutive_steps_to_run_each_level, EnvironmentSteps):
self.consecutive_steps_to_run_each_level = [consecutive_steps_to_run_each_level] * len(self.agents_params)
for agent_params in agents_params:
agent_params.visualization = self.visualization_parameters
if agent_params.input_filter is None:
agent_params.input_filter = self.env_params.default_input_filter()
if agent_params.output_filter is None:
agent_params.output_filter = self.env_params.default_output_filter()
if len(self.agents_params) < 2:
raise ValueError("The HRL graph manager must receive the agent parameters for at least two levels of the "
"hierarchy. Otherwise, use the basic RL graph manager.")
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[LevelManager], List[Environment]]:
self.env_params.seed = task_parameters.seed
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
for agent_params in self.agents_params:
agent_params.task_parameters = task_parameters
# we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level
# to be known
level_managers = []
current_env = env
# out_action_space = env.action_space
for level_idx, agent_params in reversed(list(enumerate(self.agents_params))):
# TODO: the code below is specific for HRL on observation scale
# in action space
# if level_idx == 0:
# # top level agents do not get directives
# in_action_space = None
# else:
# pass
# attention_size = (env.state_space['observation'].shape - 1)//4
# in_action_space = AttentionActionSpace(shape=2, low=0, high=env.state_space['observation'].shape - 1,
# forced_attention_size=attention_size)
# agent_params.output_filter.action_filters['masking'].set_masking(0, attention_size)
agent_params.name = "agent_{}".format(level_idx)
agent_params.is_a_highest_level_agent = level_idx == 0
agent = short_dynamic_import(agent_params.path)(agent_params)
level_manager = LevelManager(
agents=agent,
environment=current_env,
real_environment=env,
steps_limit=self.consecutive_steps_to_run_each_level[level_idx],
should_reset_agent_state_after_time_limit_passes=level_idx > 0,
name="level_{}".format(level_idx)
)
current_env = level_manager
level_managers.insert(0, level_manager)
# out_action_space = in_action_space
return level_managers, [env]
|
sphinx-sources/Examples/ComputerPrac/FresnelPlane.py | jccmak/lightpipes | 132 | 26893 | #!/usr/bin/env python
"""
Computer practical 6.1. Fresnel diffraction, plane wavefront.
=============================================================
This is part of the 'computer practical' set of assignments.
Demonstrates Fresnel diffraction when a plane wavefront enters
a round hole.
Measure the values of z and d for which minima and/or maxima on-axis occur
and apply the Fresnel-zone theory to find the wavelength of the light.
"""
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import sys
import webbrowser
if sys.version_info[0] < 3:
from Tkinter import *
import Tkinter as Tk
else:
from tkinter import *
import tkinter as Tk
from LightPipes import *
root = Tk.Tk()
root.wm_title("Computer practical: 6.1 Fresnel plane wavefront. LP-version = " + LPversion)
root.wm_protocol("WM_DELETE_WINDOW", root.quit)
wavelength=530*nm;
size=5*mm;
N=200; N2=int(N/2)
z=20*cm
R=0.5*mm
D=DoubleVar()
Z=DoubleVar()
D.set(2*R/mm)
Z.set(z/cm)
fig=plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)
v=StringVar()
def TheExample(event):
global I
F=Begin(size,wavelength,N)
z=Z.get()*cm
R=D.get()/2*mm
F=CircAperture(R,0,0,F)
FN=R*R/z/wavelength
if (FN >= 15.0):
F=Forvard(z,F)
else:
F=Fresnel(z,F)
I=Intensity(0,F)
ax1.clear()
ax1.contourf(I,50,cmap='hot'); ax1.axis('off'); ax1.axis('equal')
str='Intensity distribution\ncenter-irradiance = %3.3f [a.u.]' %I[N2][N2]
ax1.set_title(str)
canvas.draw()
def motion(event):
x=event.xdata;y=event.ydata
if (x and y is not None and x>0 and x<N and y>0 and y<N):
v.set('x=%3.2f mm, y=%3.2f mm\n I=%3.3f [a.u.]' %((-size/2+x*size/N)/mm,(-size/2+y*size/N)/mm,I[int(x)][int(y)]))
root.configure(cursor='crosshair')
else:
v.set('')
root.configure(cursor='arrow')
def openbrowser(event):
webbrowser.open_new(r"https://opticspy.github.io/lightpipes/FresnelDiffraction.html")
def _quit():
root.quit()
Scale( root,
takefocus = 1,
orient='horizontal',
label = 'diameter aperture [mm]',
length = 200, from_=0.5, to=size/2/mm,
resolution = 0.001,
variable = D,
cursor="hand2",
command = TheExample).pack()
Scale( root,
takefocus = 1,
orient='horizontal',
label = 'z [cm]',
length = 200,
from_=0.01, to=200.0,
resolution = 0.01,
variable = Z,
cursor="hand2",
command = TheExample).pack()
Button( root,
width = 24,
text='Quit',
cursor="hand2",
command=_quit).pack(pady=10)
link = Label(root, text="help", fg="blue", cursor="hand2")
link.pack()
link.bind("<Button-1>", openbrowser)
Label(root, textvariable=v).pack(pady=50)
cid = fig.canvas.mpl_connect('motion_notify_event', motion)
TheExample(0)
root.mainloop()
root.destroy()
|
python/parserDev/brothon/live_simulator.py | jzadeh/aktaion | 112 | 26926 | """LiveSimulator: This class reads in various Bro IDS logs. The class utilizes
the BroLogReader and simply loops over the static bro log
file, replaying rows and changing any time stamps
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
from __future__ import print_function
import os
import time
import datetime
import itertools
# Third party
import numpy as np
# Local Imports
from brothon import bro_log_reader
from brothon.utils import file_utils
class LiveSimulator(object):
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes the
BroLogReader and simply loops over the static bro log file
replaying rows at the specified EPS and changing timestamps to 'now()'
"""
def __init__(self, filepath, eps=10, max_rows=None):
"""Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
# Compute EPS timer
# Logic:
# - Normal distribution centered around 1.0/eps
# - Make sure never less than 0
# - Precompute 1000 deltas and then just cycle around
self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])
# Initialize the Bro log reader
self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)
# Store max_rows
self.max_rows = max_rows
def readrows(self):
"""Using the BroLogReader this method yields each row of the log file
replacing timestamps, looping and emitting rows based on EPS rate
"""
# Loop forever or until max_rows is reached
num_rows = 0
while True:
# Yield the rows from the internal reader
for row in self.log_reader.readrows():
yield self.replace_timestamp(row)
# Sleep and count rows
time.sleep(next(self.eps_timer))
num_rows += 1
# Check for max_rows
if self.max_rows and (num_rows >= self.max_rows):
return
@staticmethod
def replace_timestamp(row):
"""Replace the timestamp with now()"""
if 'ts' in row:
row['ts'] = datetime.datetime.utcnow()
return row
def test():
"""Test for LiveSimulator Python Class"""
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data')
test_path = os.path.join(data_path, 'conn.log')
print('Opening Data File: {:s}'.format(test_path))
# Create a LiveSimulator reader
reader = LiveSimulator(test_path, max_rows=10)
for line in reader.readrows():
print(line)
print('Read with max_rows Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
test()
|
test/util_test.py | quiet-oceans/libais | 161 | 26943 | #!/usr/bin/env python
"""Tests for ais.util."""
import unittest
from ais import util
import six
class UtilTest(unittest.TestCase):
def testMaybeToNumber(self):
self.assertEqual(util.MaybeToNumber(None), None)
self.assertEqual(util.MaybeToNumber([]), [])
self.assertEqual(util.MaybeToNumber({}), {})
self.assertEqual(util.MaybeToNumber('a'), 'a')
self.assertEqual(util.MaybeToNumber(1), 1)
self.assertEqual(util.MaybeToNumber(-3.12), -3.12)
self.assertEqual(util.MaybeToNumber('-1'), -1)
self.assertIsInstance(util.MaybeToNumber('-1'), int)
self.assertEqual(util.MaybeToNumber('42.0'), 42.0)
self.assertIsInstance(util.MaybeToNumber('42.0'), float)
value = 9999999999999999999999999
value_str = '9999999999999999999999999'
self.assertEqual(util.MaybeToNumber(value_str), value)
self.assertIsInstance(util.MaybeToNumber(value_str), six.integer_types)
self.assertEqual(
util.MaybeToNumber('1e99999999999999999999999'), float('inf'))
self.assertEqual(
util.MaybeToNumber('-1e99999999999999999999999'), float('-inf'))
if __name__ == '__main__':
unittest.main()
|
Chapter8/listing8_1.py | hohsieh/osgeopy-code | 160 | 26945 | <filename>Chapter8/listing8_1.py
# Script to reproject a shapefile.
from osgeo import ogr, osr
# Create an output SRS.
sr = osr.SpatialReference()
sr.ImportFromProj4('''+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23
+lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80
+datum=NAD83 +units=m +no_defs''')
# Don't forget to change your directory here.
ds = ogr.Open(r'D:\osgeopy-data\US', 1)
# Get the input layer.
in_lyr = ds.GetLayer('us_volcanos')
# Create the empty output layer.
out_lyr = ds.CreateLayer('us_volcanos_aea', sr,
ogr.wkbPoint)
out_lyr.CreateFields(in_lyr.schema)
# Loop through the features in the input layer.
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
for in_feat in in_lyr:
# Clone the geometry, project it, and add it to the feature.
geom = in_feat.geometry().Clone()
geom.TransformTo(sr)
out_feat.SetGeometry(geom)
# Copy attributes.
for i in range(in_feat.GetFieldCount()):
out_feat.SetField(i, in_feat.GetField(i))
# Insert the feature
out_lyr.CreateFeature(out_feat)
|
Validation/RecoTrack/python/customiseMTVForBPix123Holes.py | ckamtsikis/cmssw | 852 | 26947 | <filename>Validation/RecoTrack/python/customiseMTVForBPix123Holes.py
from __future__ import print_function
# This customise file provides an example (in the form of holes in
# BPix L1-L2 and L3-L3) on how to select a subset of generalTracks
# (e.g. by phi and eta) and setup various MTV instances for those
# (selected tracks, built tracks, and seeds in this case). The naming
# of DQM folders is consistent with an example in trackingCompare.py
import FWCore.ParameterSet.Config as cms
def customiseMTVForBPix123Holes(process):
from Validation.RecoTrack.cutsRecoTracks_cfi import cutsRecoTracks as _cutsRecoTracks
import math
_minPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.minPhi.value()
_maxPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.maxPhi.value()
_nPhi = process.trackValidatorTrackingOnly.histoProducerAlgoBlock.nintPhi.value()
_binPhi = (_maxPhi - _minPhi) / _nPhi
process.generalTracksL1L2 = _cutsRecoTracks.clone(
minLayer = 0,
quality = [],
minRapidity = -1.0, # also eta < -1 is affected, but let's start with this
minPhi=_minPhi+_binPhi*14, maxPhi=_minPhi+_binPhi*19) # ~0.7 .. ~0.2
process.generalTracksL2L3 = process.generalTracksL1L2.clone(
minRapidity = -0.9, maxRapidity = 2,
minPhi=_minPhi+_binPhi*33, maxPhi=_minPhi+_binPhi + 2*math.pi) # ~2.6 .. ~3.3
print("L1L2 %f %f" % (process.generalTracksL1L2.minPhi.value(), process.generalTracksL1L2.maxPhi.value()))
print("L2L3 %f %f" % (process.generalTracksL2L3.minPhi.value(), process.generalTracksL2L3.maxPhi.value()))
from CommonTools.RecoAlgos.trackingParticleRefSelector_cfi import trackingParticleRefSelector as _trackingParticleRefSelector
process.trackingParticlesL1L2 = _trackingParticleRefSelector.clone(
signalOnly = False,
chargedOnly = False,
tip = 1e5,
lip = 1e5,
minRapidity = process.generalTracksL1L2.minRapidity.value(),
maxRapidity = process.generalTracksL1L2.maxRapidity.value(),
ptMin = 0,
minPhi = process.generalTracksL1L2.minPhi.value(),
maxPhi = process.generalTracksL1L2.maxPhi.value(),
)
process.trackingParticlesL2L3 = process.trackingParticlesL1L2.clone(
minRapidity = process.generalTracksL2L3.minRapidity.value(),
maxRapidity = process.generalTracksL2L3.maxRapidity.value(),
minPhi = process.generalTracksL2L3.minPhi.value(),
maxPhi = process.generalTracksL2L3.maxPhi.value(),
)
process.tracksPreValidationTrackingOnly += (
process.trackingParticlesL1L2 +
process.trackingParticlesL2L3 +
process.generalTracksL1L2 +
process.generalTracksL2L3
)
process.trackValidatorTrackingOnlyL1L2 = process.trackValidatorTrackingOnly.clone(
dirName = process.trackValidatorTrackingOnly.dirName.value().replace("Track/", "TrackL1L2/"),
label_tp_effic = "trackingParticlesL1L2",
label_tp_effic_refvector = True,
label = ["generalTracksL1L2"],
)
process.trackValidatorTrackingOnlyL2L3 = process.trackValidatorTrackingOnlyL1L2.clone(
dirName = process.trackValidatorTrackingOnlyL1L2.dirName.value().replace("L1L2", "L2L3"),
label_tp_effic = "trackingParticlesL2L3",
label = ["generalTracksL2L3"],
)
process.trackValidatorsTrackingOnly += (
process.trackValidatorTrackingOnlyL1L2 +
process.trackValidatorTrackingOnlyL2L3
)
for trkColl in process.trackValidatorTrackingOnly.label:
if "ByAlgoMask" in trkColl: continue
if "Pt09" in trkColl and not trkColl in ["generalTracksPt09", "cutsRecoTracksPt09Hp"]: continue
if trkColl != "generalTracks":
selL1L2 = getattr(process, trkColl).clone(src="generalTracksL1L2")
selL2L3 = getattr(process, trkColl).clone(src="generalTracksL2L3")
if "Pt09" in trkColl:
selL1L2Name = trkColl.replace("Pt09", "Pt09L1L2")
selL2L3Name = trkColl.replace("Pt09", "Pt09L2L3")
else:
selL1L2Name = trkColl.replace("cutsRecoTracks", "cutsRecoTracksL1L2")
selL2L3Name = trkColl.replace("cutsRecoTracks", "cutsRecoTracksL2L3")
setattr(process, selL1L2Name, selL1L2)
setattr(process, selL2L3Name, selL2L3)
process.tracksPreValidationTrackingOnly += (selL1L2+selL2L3)
process.trackValidatorTrackingOnlyL1L2.label.append(selL1L2Name)
process.trackValidatorTrackingOnlyL2L3.label.append(selL2L3Name)
for midfix in ["Building", "Seeding"]:
label = "trackValidator%sTrackingOnly" % midfix
mtv = getattr(process, label)
mtvL1L2 = mtv.clone(
dirName = mtv.dirName.value()[:-1] + "L1L2/",
label_tp_effic = "trackingParticlesL1L2",
label_tp_effic_refvector = True,
label = [],
mvaLabels = cms.PSet(),
doMVAPlots = False,
)
mtvL2L3 = mtvL1L2.clone(
dirName = mtvL1L2.dirName.value().replace("L1L2", "L2L3"),
label_tp_effic = "trackingParticlesL2L3",
)
setattr(process, label+"L1L2", mtvL1L2)
setattr(process, label+"L2L3", mtvL2L3)
process.trackValidatorsTrackingOnly += (
mtvL1L2 +
mtvL2L3
)
for trkColl in mtv.label:
selL1L2 = process.generalTracksL1L2.clone(src=trkColl)
selL2L3 = process.generalTracksL2L3.clone(src=trkColl)
selL1L2Name = trkColl+"L1L2"
selL2L3Name = trkColl+"L2L3"
setattr(process, selL1L2Name, selL1L2)
setattr(process, selL2L3Name, selL2L3)
process.tracksPreValidationTrackingOnly += (selL1L2+selL2L3)
mtvL1L2.label.append(selL1L2Name)
mtvL2L3.label.append(selL2L3Name)
return process
|
examples/hist.py | RyanAugust/geoplotlib | 1,021 | 26959 | <reponame>RyanAugust/geoplotlib
"""
Example of 2D histogram
"""
import geoplotlib
from geoplotlib.utils import read_csv, BoundingBox
data = read_csv('data/opencellid_dk.csv')
geoplotlib.hist(data, colorscale='sqrt', binsize=8)
geoplotlib.set_bbox(BoundingBox.DK)
geoplotlib.show()
|
recipes/Python/578871_Simple_Tkinter_strip_chart/recipe-578871.py | tdiprima/code | 2,023 | 26969 | # (c) MIT License Copyright 2014 <NAME>
# Please reuse, modify or distribute freely.
from collections import OrderedDict
import tkinter as tk
class StripChart( tk.Frame ):
def __init__( self, parent, scale, historySize, trackColors, *args, **opts ):
# Initialize
super().__init__( parent, *args, **opts )
self._trackHist = OrderedDict() # Map: TrackName -> list of canvas objID
self._trackColor = trackColors # Map: Track Name -> color
self._chartHeight = scale + 1
self._chartLength = historySize * 2 # Stretch for readability
self._canvas = tk.Canvas( self, height=self._chartHeight + 17,
width=self._chartLength, background='black' )
self._canvas.grid( sticky=tk.N+tk.S+tk.E+tk.W )
# Draw horizontal to divide plot from tick labels
x, y = 0, self._chartHeight + 2
x2, y2 = self._chartLength, y
self._baseLine = self._canvas.create_line( x, y, x2, y2, fill='white' )
# Init track def and histories lists
self._trackColor.update( { 'tick':'white', 'tickline':'white',
'ticklabel':'white' } )
for trackName in self._trackColor.keys():
self._trackHist[ trackName ] = [ None for x in range(historySize) ]
def plotValues( self, **vals ):
for trackName, trackHistory in self._trackHist.items():
# Scroll left-wards
self._canvas.delete( trackHistory.pop(0) )
# Remove left-most canvas objs
self._canvas.move( trackName, -2, 0 )
# Scroll canvas objs 2 pixels left
# Plot the new values
try:
val = vals[ trackName ]
x = self._chartLength
y = self._chartHeight - val
color = self._trackColor[ trackName ]
objId = self._canvas.create_line( x, y, x+1, y, fill=color,
width=3, tags=trackName )
trackHistory.append( objId )
except:
trackHistory.append( None )
def drawTick( self, text=None, **lineOpts ):
# draw vertical tick line
x = self._chartLength
y = 1
x2 = x
y2 = self._chartHeight
color = self._trackColor[ 'tickline' ]
objId = self._canvas.create_line( x, y, x2, y2, fill=color,
tags='tick', **lineOpts )
self._trackHist[ 'tickline' ].append( objId )
# draw tick label
if text is not None:
x = self._chartLength
y = self._chartHeight + 10
color = self._trackColor[ 'ticklabel' ]
objId = self._canvas.create_text( x, y, text=text,
fill=color, tags='tick' )
self._trackHist[ 'ticklabel' ].append( objId )
def configTrackColors( self, **trackColors ):
# Change plotted data color
for trackName, colorName in trackColors.items( ):
self._canvas.itemconfigure( trackName, fill=colorName )
# Change settings so future data has the new color
self._trackColor.update( trackColors )
if __name__ == '__main__':
top = tk.Tk( )
graph = StripChart( top, 100, 300, { 'A':'blue', 'B':'green', 'C':'red' } )
graph.grid( )
val_A = 0
val_B = 0
val_C = 0
delta = [ -3, -2, -1, 0, 1, 2, 3 ] # randomly vary the values by one of these
tickCount = 0
def nextVal( current, lowerBound, upperBound ):
from random import choice
current += choice( delta )
if current < lowerBound:
return lowerBound
elif current > upperBound:
return upperBound
else:
return current
def plotNextVals( ):
global val_A, val_B, val_C, tickCount
if tickCount % 50 == 0:
graph.drawTick( text=str(tickCount), dash=(1,4) )
tickCount += 1
val_A = nextVal( val_A, 0, 99 )
val_B = nextVal( val_B, 0, 99 )
val_C = nextVal( val_C, 0, 99 )
graph.plotValues( A=val_A, B=val_B, C=val_C )
#changeColor = { 800: 'black',
#1200: 'yellow',
#1600: 'orange',
#2000: 'white',
#2400: 'brown',
#2800: 'blue' }
#if tickCount in changeColor:
#graph.configTrackColors( A=changeColor[tickCount] )
top.after( 1, plotNextVals )
top.after( 1, plotNextVals )
top.mainloop( )
|
graph_explorer/structured_metrics/plugins/vmstat.py | farheenkaifee/dashboard_3 | 284 | 26977 | from . import Plugin
class VmstatPlugin(Plugin):
targets = [
{
'match': '^servers\.(?P<server>[^\.]+)\.vmstat\.(?P<type>.*)$',
'target_type': 'rate',
'tags': {'unit': 'Page'}
}
]
def sanitize(self, target):
target['tags']['type'] = target['tags']['type'].replace('pgpg', 'paging_')
target['tags']['type'] = target['tags']['type'].replace('pswp', 'swap_')
# vim: ts=4 et sw=4:
|
SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_craft_cfi.py | ckamtsikis/cmssw | 852 | 26991 | import FWCore.ParameterSet.Config as cms
simEcalDigis = cms.EDProducer("EcalSelectiveReadoutProducer",
# Label of input EB and EE digi collections
digiProducer = cms.string('simEcalUnsuppressedDigis'),
# Instance name of input EB digi collections
EBdigiCollection = cms.string(''),
# Instance name of input EB digi collections
EEdigiCollection = cms.string(''),
# Instance name of output EB SR flags collection
EBSrFlagCollection = cms.string('ebSrFlags'),
# Instance name of output EE SR flags collection
EESrFlagCollection = cms.string('eeSrFlags'),
# Instance name of output EB digis collection
EBSRPdigiCollection = cms.string('ebDigis'),
# Instance name of output EE digis collection
EESRPdigiCollection = cms.string('eeDigis'),
# Label name of input ECAL trigger primitive collection
trigPrimProducer = cms.string('simEcalTriggerPrimitiveDigis'),
# Instance name of ECAL trigger primitive collection
trigPrimCollection = cms.string(''),
# Neighbour eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaEta = cms.int32(1),
# Neighbouring eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaPhi = cms.int32(1),
# Index of time sample (staring from 1) the first DCC weights is implied
ecalDccZs1stSample = cms.int32(3),
# ADC to GeV conversion factor used in ZS filter for EB
ebDccAdcToGeV = cms.double(0.035),
# ADC to GeV conversion factor used in ZS filter for EE
eeDccAdcToGeV = cms.double(0.06),
#DCC ZS FIR weights.
#d-efault value set of DCC firmware used in CRUZET and CRAFT
dccNormalizedWeights = cms.vdouble(-1.1865, 0.0195, 0.2900, 0.3477, 0.3008,
0.2266),
# Switch to use a symetric zero suppression (cut on absolute value). For
# studies only, for time being it is not supported by the hardware.
symetricZS = cms.bool(False),
# ZS energy threshold in GeV to apply to low interest channels of barrel
srpBarrelLowInterestChannelZS = cms.double(3*.035),
# ZS energy threshold in GeV to apply to low interest channels of endcap
srpEndcapLowInterestChannelZS = cms.double(3*0.06),
# ZS energy threshold in GeV to apply to high interest channels of barrel
srpBarrelHighInterestChannelZS = cms.double(-1.e9),
# ZS energy threshold in GeV to apply to high interest channels of endcap
srpEndcapHighInterestChannelZS = cms.double(-1.e9),
#switch to run w/o trigger primitive. For debug use only
trigPrimBypass = cms.bool(False),
#for debug mode only:
trigPrimBypassLTH = cms.double(1.0),
#for debug mode only:
trigPrimBypassHTH = cms.double(1.0),
#for debug mode only
trigPrimBypassWithPeakFinder = cms.bool(True),
# Mode selection for "Trig bypass" mode
# 0: TT thresholds applied on sum of crystal Et's
# 1: TT thresholds applies on compressed Et from Trigger primitive
# @ee trigPrimByPass_ switch
trigPrimBypassMode = cms.int32(0),
#number of events whose TT and SR flags must be dumped (for debug purpose):
dumpFlags = cms.untracked.int32(0),
#logical flag to write out SrFlags
writeSrFlags = cms.untracked.bool(True),
#switch to apply selective readout decision on the digis and produce
#the "suppressed" digis
produceDigis = cms.untracked.bool(True),
#Trigger Tower Flag to use when a flag is not found from the input
#Trigger Primitive collection. Must be one of the following values:
# 0: low interest, 1: mid interest, 3: high interest
# 4: forced low interest, 5: forced mid interest, 7: forced high interest
defaultTtf_ = cms.int32(4),
# SR->action flag map
actions = cms.vint32(1, 3, 3, 3, 5, 7, 7, 7)
)
|
plugin/AssemblerSPAdes/bin/RunAssembler.py | konradotto/TS | 125 | 27024 | #!/usr/bin/env python
import json
import os
import subprocess
import sys
def fileExistsAndNonEmpty(filename):
if not os.path.exists(filename):
return False
return os.stat(filename).st_size > 0
class AssemblerRunner(object):
def __init__(self, sample_id, sample_seq, bam_file):
with open("startplugin.json", "r") as fh:
self.config = json.load(fh)
self.params = self.config['pluginconfig']
# launch.sh creates a symlink to the input BAM file in this directory
self.output_dir = self.config['runinfo']['results_dir']
self.sample_id = sample_id
self.sample_seq = sample_seq
self.sample_name = sample_id + "." + sample_seq
self.sample_output_dir = os.path.join(self.output_dir, self.sample_name)
self.bam_file = bam_file
self.bam_rel_path = os.path.join(self.sample_name, self.bam_file)
# relative path to the input bam file
self.bam_to_assemble = os.path.join(self.output_dir, self.bam_rel_path)
# how much to downsample (the step is skipped if it equals to 1)
if self.params.has_key('fraction_of_reads'):
self.fraction_of_reads = float(self.params['fraction_of_reads'])
# all executables are located in bin/ subdirectory
self.assembler_path = os.path.join(os.environ['DIRNAME'], 'bin')
# where to output HTML with results
self.url_root = self.config['runinfo']['url_root']
# skip assembly (and run only QUAST) if contigs exist
self.quast_only = self.params.has_key('quastOnly')
# information will be printed to "info.json"
self.info = { 'params' : self.params, 'executedCommands' : [] }
if sample_id != '' and sample_seq != '':
self.info['sampleId'] = sample_id
self.info['sampleSeq'] = sample_seq
self.info['sampleName'] = self.sample_name
# Prints 'pluginconfig' section of 'startplugin.json'
def printAssemblyParameters(self):
print("AssemblerSPAdes run parameters:")
print(self.params)
def writeInfo(self, json_filename):
with open(json_filename, 'w+') as f:
json.dump(self.info, f, indent=4)
def runCommand(self, command, description=None):
if description:
print(description)
else:
print(command)
sys.stdout.flush()
os.system(command)
self.info['executedCommands'].append(command)
def runDownsampling(self):
print("\nSubsampling using Picard")
# downsampler = os.path.join(self.assembler_path, 'DownsampleSam.jar')
downsampler = "/opt/picard/picard-tools-current/picard.jar"
out = os.path.join(self.sample_output_dir, self.bam_file + "_scaled")
cmd = ("java -Xmx2g -jar {downsampler} "
"DownsampleSam "
"INPUT={self.bam_to_assemble} OUTPUT={out} "
"PROBABILITY={self.fraction_of_reads}").format(**locals())
self.runCommand(cmd)
cmd = ("mv {out} {self.bam_to_assemble}").format(**locals())
self.runCommand(cmd)
def execute(self):
self.printAssemblyParameters()
read_count_cmd = "samtools view -c " + self.bam_rel_path
read_count_process = subprocess.Popen(read_count_cmd, shell=True,
stdout=subprocess.PIPE)
num_reads = int(read_count_process.communicate()[0])
def tooFewReads():
if not self.params.has_key('min_reads'):
return False
self.min_reads = int(self.params['min_reads'])
return num_reads <= self.min_reads
print("%d reads in %s" % (num_reads, self.bam_file))
if tooFewReads():
print(("\tDoes not have more than %d reads. "
"Skipping this file") % (self.min_reads,))
return
if self.fraction_of_reads < 1:
self.runDownsampling()
# if self.params.has_key('runSpades'):
self.runSPAdes()
def runSPAdes(self):
if self.params.has_key('spadesversion'):
version = self.params['spadesversion']
else:
version = "3.1.0"
assert(version >= "3.0.0")
rel_path = os.path.join("SPAdes-%s-Linux" % version, "bin", "spades.py")
spades_path = os.path.join(self.assembler_path, rel_path)
output_dir = os.path.join(self.sample_name, "spades")
contigs_fn = os.path.join(output_dir, "contigs.fasta")
scaffolds_fn = os.path.join(output_dir, "scaffolds.fasta")
log_fn = os.path.join(output_dir, "spades.log")
skip_assembly = self.quast_only and fileExistsAndNonEmpty(contigs_fn)
if self.params.has_key('spadesOptions'):
user_options = self.params['spadesOptions']
else:
user_options = "-k 21,33,55,77,99"
spades_info = {'contigs' : contigs_fn,
'scaffolds' : scaffolds_fn,
'log' : log_fn,
'userOptions' : user_options,
'version' : version }
pid = os.getpid()
if not skip_assembly:
cmd = ("{spades_path} --iontorrent --tmp-dir /tmp/{pid} "
"-s {self.bam_to_assemble} -o {output_dir} "
"{user_options} > /dev/null").format(**locals())
print("Running AssemblerSPAdes - SPAdes %s" % version)
self.runCommand(cmd)
report_dir = self.createQuastReport(contigs_fn, output_dir)
spades_info['quastReportDir'] = report_dir
self.info['spades'] = spades_info
def createQuastReport(self, contigs_fn, output_dir):
version = "2.3"
rel_path = os.path.join("quast-%s" % version, "quast.py")
quast_path = os.path.join(self.assembler_path, rel_path)
# quast_reference = self.params['bgenome']
quast_reference = "None"
quast_results_dir = os.path.join(output_dir, "quast_results")
print("Running QUAST %s" % version)
reference_param = ("-R " + quast_reference) if quast_reference!="None" else " "
cmd = ("{quast_path} -o {quast_results_dir} "
"{reference_param} {contigs_fn}").format(**locals())
self.runCommand(cmd)
try:
if os.path.isfile(os.path.join(quast_results_dir, "report.html")):
return os.path.abspath(quast_results_dir)
else:
return None
except:
return None
import sys
if __name__ == "__main__":
if len(sys.argv) == 5:
sample_id = sys.argv[1]
sample_seq = sys.argv[2]
bam_file = sys.argv[3]
out_dir = sys.argv[4]
runner = AssemblerRunner(sample_id, sample_seq, bam_file)
runner.execute()
runner.writeInfo("%s/info_%s.%s.json" % (out_dir,sample_id, sample_seq))
else:
assert(len(sys.argv) == 3) # not a barcode run
bam_file = sys.argv[1]
out_dir = sys.argv[2]
# HACK: sample_name = '.' => essentially vanishes from all paths
runner = AssemblerRunner('', '', bam_file)
runner.execute()
runner.writeInfo("%s/info.json" % (out_dir))
|
blogs/views/feed.py | daaawx/bearblog | 657 | 27051 | from django.http.response import Http404
from django.http import HttpResponse
from blogs.helpers import unmark, clean_text
from blogs.views.blog import resolve_address
from feedgen.feed import FeedGenerator
import mistune
def feed(request):
blog = resolve_address(request)
if not blog:
raise Http404("Blog does not exist")
all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date')
fg = FeedGenerator()
fg.id(blog.useful_domain())
fg.author({'name': blog.subdomain, 'email': 'hidden'})
fg.title(blog.title)
fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title)
fg.link(href=f"{blog.useful_domain()}/", rel='alternate')
for post in all_posts:
fe = fg.add_entry()
fe.id(f"{blog.useful_domain()}/{post.slug}/")
fe.title(post.title)
fe.author({'name': blog.subdomain, 'email': 'hidden'})
fe.link(href=f"{blog.useful_domain()}/{post.slug}/")
fe.content(clean_text(mistune.html(post.content)), type="html")
fe.published(post.published_date)
fe.updated(post.published_date)
if request.GET.get('type') == 'rss':
fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self')
rssfeed = fg.rss_str(pretty=True)
return HttpResponse(rssfeed, content_type='application/rss+xml')
else:
fg.link(href=f"{blog.useful_domain()}/feed/", rel='self')
atomfeed = fg.atom_str(pretty=True)
return HttpResponse(atomfeed, content_type='application/atom+xml')
|
tests/functional/test_tagged_unions_unknown.py | karim7262/botocore | 1,063 | 27054 | <gh_stars>1000+
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from tests import unittest
class TestTaggedUnionsUnknown(unittest.TestCase):
def test_tagged_union_member_name_does_not_coincide_with_unknown_key(self):
# This test ensures that operation models do not use SDK_UNKNOWN_MEMBER
# as a member name. Thereby reserving SDK_UNKNOWN_MEMBER for the parser to
# set as a key on the reponse object. This is necessary when the client
# encounters a member that it is unaware of or not modeled.
session = Session()
for service_name in session.get_available_services():
service_model = session.get_service_model(service_name)
for shape_name in service_model.shape_names:
shape = service_model.shape_for(shape_name)
if hasattr(shape, 'is_tagged_union') and shape.is_tagged_union:
self.assertNotIn('SDK_UNKNOWN_MEMBER', shape.members)
|
tools/launcher.py | agentx-cgn/Hannibal | 189 | 27105 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
https://docs.python.org/2/library/subprocess.html#popen-objects
http://stackoverflow.com/questions/1606795/catching-stdout-in-realtime-from-subprocess
http://askubuntu.com/questions/458041/find-x-window-name
http://stackoverflow.com/questions/9681959/how-can-i-use-xdotool-from-within-a-python-module-script
http://manpages.ubuntu.com/manpages/trusty/en/man1/avconv.1.html
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
xwininfo gives window info: xwininfo: Window id: 0x2800010 "0 A.D."
xdotool:
sudo apt-get install libx11-dev libxtst-dev libXinerama-dev
make
make install
https://github.com/nullkey/glc/wiki/Capture
glc-capture --start --fps=30 --resize=1.0 --disable-audio --out=pyro.glc ./launcher.py
glc-play pyro.glc -o - -y 1 | avconv -i - -an -y pyro.mp4
avconv -i pyro.mp4 -codec copy -ss 15 -y pyro01.mp4
qt-faststart pyro01.mp4 pyro02.mp4
mplayer pyro02.mp4
'''
VERSION = "0.2.0"
import os, sys, subprocess, time, json
from time import sleep
sys.dont_write_bytecode = True
## maps etc.
from data import data
bcolors = {
"Bold": "\033[1m",
"Header" : "\033[95m",
"LBlue" : "\033[94m", ## light blue
"DBlue" : "\033[34m", ## dark blue
"OKGreen" : "\033[32m", ## dark Green
"Green" : "\033[92m", ## light green
"Warn" : "\033[33m", ## orange
"Fail" : "\033[91m",
"End" : "\033[0m",
# orange='\033[33m'
}
def printc(color, text) :
print (bcolors[color] + text + bcolors["End"])
def stdc(color, text) :
sys.stdout.write (bcolors[color] + text + bcolors["End"])
folders = {
"pro" : "/home/noiv/Desktop/0ad", ## project
"rel" : "/usr/games/0ad", ## release
"trunk" : "/Daten/Projects/Osiris/ps/trunk", ## svn
"share" : "/home/noiv/.local/share", ## user mod
}
## the game binary
locations = {
"rel" : folders["rel"], ## release
"svn" : folders["trunk"] + "/binaries/system/pyrogenesis", ## svn
"hbl" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/", ## bot folder
"deb" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/_debug.js", ## bot folder
"log" : folders["pro"] + "/last.log", ## log file
"ana" : folders["pro"] + "/analysis/", ## analysis csv file
}
## Hannibal log/debug options + data, readable by JS and Python
DEBUG = {
## default map
"map": "scenarios/Arcadia 02",
## counter
"counter": [],
## num: 0=no numerus
## xdo: move window, sim speed
## fil can use files
## log: 0=silent, 1+=errors, 2+=warnings, 3+=info, 4=all
## col: log colors
## sup: suppress, bot does not intialize (saves startup time)
## tst: activate tester
"bots": {
"0" : {"num": 0, "xdo": 0, "fil": 0, "log": 4, "sup": 1, "tst": 0, "col": "" },
"1" : {"num": 1, "xdo": 1, "fil": 1, "log": 4, "sup": 0, "tst": 1, "col": "" },
"2" : {"num": 0, "xdo": 0, "fil": 0, "log": 3, "sup": 0, "tst": 1, "col": "" },
"3" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"4" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"5" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"6" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"7" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"8" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
}
}
## keep track of open file handles
files = {}
## civs to choose from at start
civs = [
"athen",
"brit",
"cart",
"celt",
"gaul",
"hele",
"iber",
"mace",
"maur",
"pers",
"ptol",
"rome",
"sele",
"spart",
]
def buildCmd(typ="rel", map="Arcadia 02", bots=2) :
## see /ps/trunk/binaries/system/readme.txt
cmd = [
locations[typ],
"-quickstart", ## load faster (disables audio and some system info logging)
"-autostart=" + map, ## enables autostart and sets MAPNAME; TYPEDIR is skirmishes, scenarios, or random
"-mod=public", ## start the game using NAME mod
"-mod=charts",
"-mod=hannibal",
"-autostart-seed=0", ## sets random map SEED value (default 0, use -1 for random)
"-autostart-size=192", ## sets random map size in TILES (default 192)
# "-autostart-players=2", ## sets NUMBER of players on random map (default 2)
# "-autostart-ai=1:hannibal",
# "-autostart-civ=1:athen", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
# "-autostart-ai=2:hannibal", ## sets the AI for PLAYER (e.g. 2:petra)
# "-autostart-civ=2:cart", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
]
## svn does not autoload /user
if typ == "svn" : cmd.append("-mod=user")
## set # of players
cmd.append("-autostart-players=" + str(bots))
## add bots with civ
for bot in range(1, bots +1) :
cmd.append("-autostart-ai=" + str(bot) + ":hannibal")
cmd.append("-autostart-civ=" + str(bot) + ":" + civs[bot -1])
return cmd
def findWindow(title) :
process = subprocess.Popen("xdotool search --name '%s'" % (title), stdout=subprocess.PIPE, shell="FALSE")
windowid = process.stdout.readlines()[0].strip()
process.stdout.close()
return windowid
def xdotool(command) :
subprocess.call(("xdotool %s" % command).split(" "))
def cleanup() :
for k, v in files.iteritems() : v.close()
def writeDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.write("var HANNIBAL_DEBUG = " + json.dumps(DEBUG, indent=2) + ";")
fTest.close()
def killDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.close()
def processMaps():
proc0AD = None
DEBUG["OnUpdate"] = "print('#! terminate');"
for mp in data["testMaps"] :
DEBUG["map"] = mp
writeDEBUG()
cmd0AD = [pyrogenesis, "-quickstart", "-autostart=" + mp, "-mod=public", "-mod:hannibal", "-autostart-ai=1:hannibal"]
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print " > " + " ".join(cmd0AD)
try:
for line in iter(proc0AD.stdout.readline, b'') :
sline = line.strip()
if sline.startswith("#! terminate") :
proc0AD.terminate()
sleep(2)
if proc0AD : proc0AD.wait()
if proc0AD : proc0AD.kill()
break
else :
pass
# sys.stdout.write(line)
except KeyboardInterrupt, e :
if proc0AD : proc0AD.terminate()
break
print "done."
def launch(typ="rel", map="Arcadia 02", bots=2):
winX = 1520; winY = 20
doWrite = False
curFileNum = None
idWindow = None
proc0AD = None
def terminate() :
if proc0AD : proc0AD.terminate()
files["log"] = open(locations["log"], 'w')
files["log"].truncate()
DEBUG['map'] = map
writeDEBUG()
cmd0AD = buildCmd(typ, map, bots)
print (" cmd: %s" % " ".join(cmd0AD));
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
for line in iter(proc0AD.stdout.readline, b'') :
## line has everything
## sline is stripped
## bline is active bot line after colon
sline = line.strip() ## removes nl and wp
bline = ""
id = 0
bot = DEBUG["bots"]["0"]
## detect bot id
if len(sline) >= 2 and sline[1:3] == "::" :
id = sline[0]
bot = DEBUG["bots"][id]
bline = "" if bot["log"] == 0 else sline[3:]
files["log"].write(line)
## terminate everything
if sline.startswith("#! terminate") :
if bot["xdo"] :
print(sline)
terminate()
return
## clear console
elif bline.startswith("#! clear") :
print(sline)
sys.stderr.write("\x1b[2J\x1b[H") ## why not ??
## xdo init
elif bot["xdo"] and bline.startswith("#! xdotool init") :
idWindow = findWindow("0 A.D")
printc("DBlue", " xdo: window id: %s" % idWindow)
xdotool("windowmove %s %s %s" % (idWindow, winX, winY))
## xdo command with echo
elif bot["xdo"] and bline.startswith("#! xdotool ") :
params = " ".join(bline.split(" ")[2:])
printc("DBlue", " X11: " + params)
xdotool(params)
## xdo command without echo
elif bot["xdo"] and bline.startswith("## xdotool ") : ## same, no echo
params = " ".join(bline.split(" ")[2:])
xdotool(params)
## xdo command suppress
elif not bot["xdo"] and bline.startswith("## xdotool ") :
pass
## file open
elif bot["fil"] and bline.startswith("#! open ") :
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
## file append
elif bot["fil"] and bline.startswith("#! append ") :
filenum = bline.split(" ")[2]
dataLine = ":".join(bline.split(":")[1:])
files[filenum].write(dataLine + "\n")
## file write
elif bot["fil"] and bline.startswith("#! write ") :
print(bline)
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
curFileNum = filenum
## file close
elif bot["fil"] and bline.startswith("#! close ") :
filenum = bline.split(" ")[2]
files[filenum].close()
print("#! closed %s at %s" % (filenum, os.stat(filename).st_size))
## bot output
elif bot["log"] > 0 and bline :
if bline.startswith("ERROR :") : stdc("Fail", id + "::" + bline + "\n")
elif bline.startswith("WARN :") : stdc("Warn", id + "::" + bline + "\n")
elif bline.startswith("INFO :") : stdc("OKGreen", id + "::" + bline + "\n")
else : sys.stdout.write("" + bline + "\n")
## suppressed bots - no output
elif bot["log"] == 0:
pass
## hannibal or map or 0AD output
elif line :
if line.startswith("ERROR :") : stdc("Fail", line + "\n")
elif line.startswith("WARN :") : stdc("Warn", line + "\n")
elif line.startswith("INFO :") : stdc("OKGreen", line + "\n")
elif line.startswith("TIMER| ") : pass ## suppress 0AD debugs
elif line.startswith("sys_cursor_create:") : pass
elif line.startswith("AL lib:") : pass
elif line.startswith("Sound:") : pass
else :
sys.stdout.write("" + line)
except KeyboardInterrupt, e :
terminate()
if __name__ == '__main__':
args = sys.argv[1:]
if args[0] == "maps" :
print (" processing maps...")
processMaps(args)
else:
typ = args[0] if len(args) > 0 else "rel"
map = args[1] if len(args) > 1 else "Arcadia 02"
bots = args[2] if len(args) > 2 else "2"
launch(typ, map, int(bots))
cleanup()
print ("\nBye\n")
|
src/jarvis/jarvis/skills/collection/remember.py | jameswynn/Python-ai-assistant | 424 | 27125 | # MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jarvis.skills.skill import AssistantSkill
from jarvis.utils.mongoDB import db
from jarvis.utils import input
header = """
-----------------------------------------------------------------------------------------------
I would like to learn, tell me the right answer!
-----------------------------------------------------------------------------------------------
* Note: Create new skill! Write your question and the appropriate answer.
\n
"""
class RememberSkills(AssistantSkill):
@classmethod
def remember(cls, **kwargs):
cls.console(header)
continue_add = True
while continue_add:
cls.console(text='Question: ')
tags = cls.user_input()
cls.console(text='Suggested Response: ')
response = cls.user_input()
new_skill = {'name': 'learned_skill',
'enable': True,
'func': cls.tell_response.__name__,
'response': response,
'tags': tags,
},
cls.response('Add more? ', refresh_console=False)
continue_add = input.check_input_to_continue()
db.insert_many_documents(collection='learned_skills', documents=new_skill)
@classmethod
def tell_response(cls, **kwargs):
cls.response(kwargs.get('skill').get('response'))
@classmethod
def clear_learned_skills(cls, **kwargs):
if db.is_collection_empty(collection='learned_skills'):
cls.response("I can't find learned skills in my database")
else:
cls.response('I found learned skills..')
cls.response('Are you sure to remove learned skills? ', refresh_console=False)
user_answer = input.check_input_to_continue()
if user_answer:
db.drop_collection(collection='learned_skills')
cls.response("Perfect I have deleted them all")
|
tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py | ivangirko/ormar | 905 | 27151 | <filename>tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py<gh_stars>100-1000
import enum
import databases
import pydantic
import pytest
import sqlalchemy
from pydantic import ValidationError
import ormar
from tests.settings import DATABASE_URL
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL)
class BaseMeta(ormar.ModelMeta):
database = database
metadata = metadata
class EnumExample(str, enum.Enum):
A = "A"
B = "B"
C = "C"
class ModelExample(ormar.Model):
class Meta(ormar.ModelMeta):
database = database
metadata = metadata
tablename = "examples"
id: int = ormar.Integer(primary_key=True)
str_field: str = ormar.String(min_length=5, max_length=10, nullable=False)
enum_field: str = ormar.String(
max_length=1, nullable=False, choices=list(EnumExample)
)
@pydantic.validator("str_field")
def validate_str_field(cls, v):
if " " not in v:
raise ValueError("must contain a space")
return v
ModelExampleCreate = ModelExample.get_pydantic(exclude={"id"})
def test_ormar_validator():
ModelExample(str_field="a aaaaaa", enum_field="A")
with pytest.raises(ValidationError) as e:
ModelExample(str_field="aaaaaaa", enum_field="A")
assert "must contain a space" in str(e)
with pytest.raises(ValidationError) as e:
ModelExample(str_field="a aaaaaaa", enum_field="Z")
assert "not in allowed choices" in str(e)
def test_pydantic_validator():
ModelExampleCreate(str_field="a aaaaaa", enum_field="A")
with pytest.raises(ValidationError) as e:
ModelExampleCreate(str_field="aaaaaaa", enum_field="A")
assert "must contain a space" in str(e)
with pytest.raises(ValidationError) as e:
ModelExampleCreate(str_field="a aaaaaaa", enum_field="Z")
assert "not in allowed choices" in str(e)
|
torchbnn/functional.py | Harry24k/bayesian-neural-network-pytorch | 178 | 27192 | <reponame>Harry24k/bayesian-neural-network-pytorch
import math
import torch
from .modules import *
def _kl_loss(mu_0, log_sigma_0, mu_1, log_sigma_1) :
"""
An method for calculating KL divergence between two Normal distribtuion.
Arguments:
mu_0 (Float) : mean of normal distribution.
log_sigma_0 (Float): log(standard deviation of normal distribution).
mu_1 (Float): mean of normal distribution.
log_sigma_1 (Float): log(standard deviation of normal distribution).
"""
kl = log_sigma_1 - log_sigma_0 + \
(torch.exp(log_sigma_0)**2 + (mu_0-mu_1)**2)/(2*math.exp(log_sigma_1)**2) - 0.5
return kl.sum()
def bayesian_kl_loss(model, reduction='mean', last_layer_only=False) :
"""
An method for calculating KL divergence of whole layers in the model.
Arguments:
model (nn.Module): a model to be calculated for KL-divergence.
reduction (string, optional): Specifies the reduction to apply to the output:
``'mean'``: the sum of the output will be divided by the number of
elements of the output.
``'sum'``: the output will be summed.
last_layer_only (Bool): True for return only the last layer's KL divergence.
"""
device = torch.device("cuda" if next(model.parameters()).is_cuda else "cpu")
kl = torch.Tensor([0]).to(device)
kl_sum = torch.Tensor([0]).to(device)
n = torch.Tensor([0]).to(device)
for m in model.modules() :
if isinstance(m, (BayesLinear, BayesConv2d)):
kl = _kl_loss(m.weight_mu, m.weight_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.weight_mu.view(-1))
if m.bias :
kl = _kl_loss(m.bias_mu, m.bias_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.bias_mu.view(-1))
if isinstance(m, BayesBatchNorm2d):
if m.affine :
kl = _kl_loss(m.weight_mu, m.weight_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.weight_mu.view(-1))
kl = _kl_loss(m.bias_mu, m.bias_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.bias_mu.view(-1))
if last_layer_only or n == 0 :
return kl
if reduction == 'mean' :
return kl_sum/n
elif reduction == 'sum' :
return kl_sum
else :
raise ValueError(reduction + " is not valid")
|
Python3/537.py | rakhi2001/ecom7 | 854 | 27201 | <reponame>rakhi2001/ecom7<filename>Python3/537.py
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def complexNumberMultiply(self, a: str, b: str) -> str:
A = [int(x) for x in a.replace('i','').split('+')]
B = [int(x) for x in b.replace('i','').split('+')]
return str(A[0]*B[0]-A[1]*B[1])+'+'+str(A[0]*B[1]+A[1]*B[0])+'i'
__________________________________________________________________________________________________
sample 13124 kb submission
class Solution:
def getrc(self, strs):
val = ''
r, c = 0, 0
positive = True
for char in strs:
if char == '-':
positive = False
elif char != '+' and char != 'i':
val += char
else:
val = int(val)
if not positive: val = -val
if char == '+': r = val
else: c = val
val = ''
positive = True
return (r, c)
def complexNumberMultiply(self, a: str, b: str) -> str:
ra, ca = self.getrc(a)
rb, cb = self.getrc(b)
r = ra*rb-ca*cb
c = ra*cb+rb*ca
if r >= 0: r = str(r)
else: r = '-' + str(-r)
if c >= 0: c = str(c)
else: c = '-' + str(-c)
return r + '+' + c + 'i'
__________________________________________________________________________________________________
|
metaworld/policies/sawyer_push_wall_v2_policy.py | yiwc/robotics-world | 681 | 27212 | <reponame>yiwc/robotics-world<filename>metaworld/policies/sawyer_push_wall_v2_policy.py
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerPushWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'unused_1': obs[3],
'obj_pos': obs[4:7],
'unused_2': obs[7:-3],
'goal_pos': obs[-3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self.desired_pos(o_d), p=10.)
action['grab_effort'] = self.grab_effort(o_d)
return action.array
@staticmethod
def desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_obj = o_d['obj_pos'] + np.array([-0.005, 0, 0])
# If error in the XY plane is greater than 0.02, place end effector above the puck
if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02:
return pos_obj + np.array([0., 0., 0.2])
# Once XY error is low enough, drop end effector down on top of obj
elif abs(pos_curr[2] - pos_obj[2]) > 0.04:
return pos_obj + np.array([0., 0., 0.03])
# Move to the goal
else:
#if the wall is between the puck and the goal, go around the wall
if(-0.1 <= pos_obj[0] <= 0.3 and 0.65 <= pos_obj[1] <= 0.75):
return pos_curr + np.array([-1, 0, 0])
elif ((-0.15 < pos_obj[0] < 0.05 or 0.15 < pos_obj[0] < 0.35)
and 0.695 <= pos_obj[1] <= 0.755):
return pos_curr + np.array([0, 1, 0])
return o_d['goal_pos']
@staticmethod
def grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_obj = o_d['obj_pos']
if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02 or \
abs(pos_curr[2] - pos_obj[2]) > 0.1:
return 0.0
# While end effector is moving down toward the obj, begin closing the grabber
else:
return 0.6
|
sdk/notificationhubs/azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/__init__.py | rsdoherty/azure-sdk-for-python | 2,728 | 27240 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AdmCredential
from ._models_py3 import ApnsCredential
from ._models_py3 import BaiduCredential
from ._models_py3 import CheckAvailabilityParameters
from ._models_py3 import CheckAvailabilityResult
from ._models_py3 import DebugSendResponse
from ._models_py3 import ErrorResponse
from ._models_py3 import GcmCredential
from ._models_py3 import MpnsCredential
from ._models_py3 import NamespaceCreateOrUpdateParameters
from ._models_py3 import NamespaceListResult
from ._models_py3 import NamespacePatchParameters
from ._models_py3 import NamespaceResource
from ._models_py3 import NotificationHubCreateOrUpdateParameters
from ._models_py3 import NotificationHubListResult
from ._models_py3 import NotificationHubPatchParameters
from ._models_py3 import NotificationHubResource
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PnsCredentialsResource
from ._models_py3 import PolicykeyResource
from ._models_py3 import Resource
from ._models_py3 import ResourceListKeys
from ._models_py3 import SharedAccessAuthorizationRuleCreateOrUpdateParameters
from ._models_py3 import SharedAccessAuthorizationRuleListResult
from ._models_py3 import SharedAccessAuthorizationRuleProperties
from ._models_py3 import SharedAccessAuthorizationRuleResource
from ._models_py3 import Sku
from ._models_py3 import SubResource
from ._models_py3 import WnsCredential
except (SyntaxError, ImportError):
from ._models import AdmCredential # type: ignore
from ._models import ApnsCredential # type: ignore
from ._models import BaiduCredential # type: ignore
from ._models import CheckAvailabilityParameters # type: ignore
from ._models import CheckAvailabilityResult # type: ignore
from ._models import DebugSendResponse # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import GcmCredential # type: ignore
from ._models import MpnsCredential # type: ignore
from ._models import NamespaceCreateOrUpdateParameters # type: ignore
from ._models import NamespaceListResult # type: ignore
from ._models import NamespacePatchParameters # type: ignore
from ._models import NamespaceResource # type: ignore
from ._models import NotificationHubCreateOrUpdateParameters # type: ignore
from ._models import NotificationHubListResult # type: ignore
from ._models import NotificationHubPatchParameters # type: ignore
from ._models import NotificationHubResource # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import PnsCredentialsResource # type: ignore
from ._models import PolicykeyResource # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceListKeys # type: ignore
from ._models import SharedAccessAuthorizationRuleCreateOrUpdateParameters # type: ignore
from ._models import SharedAccessAuthorizationRuleListResult # type: ignore
from ._models import SharedAccessAuthorizationRuleProperties # type: ignore
from ._models import SharedAccessAuthorizationRuleResource # type: ignore
from ._models import Sku # type: ignore
from ._models import SubResource # type: ignore
from ._models import WnsCredential # type: ignore
from ._notification_hubs_management_client_enums import (
AccessRights,
NamespaceType,
SkuName,
)
__all__ = [
'AdmCredential',
'ApnsCredential',
'BaiduCredential',
'CheckAvailabilityParameters',
'CheckAvailabilityResult',
'DebugSendResponse',
'ErrorResponse',
'GcmCredential',
'MpnsCredential',
'NamespaceCreateOrUpdateParameters',
'NamespaceListResult',
'NamespacePatchParameters',
'NamespaceResource',
'NotificationHubCreateOrUpdateParameters',
'NotificationHubListResult',
'NotificationHubPatchParameters',
'NotificationHubResource',
'Operation',
'OperationDisplay',
'OperationListResult',
'PnsCredentialsResource',
'PolicykeyResource',
'Resource',
'ResourceListKeys',
'SharedAccessAuthorizationRuleCreateOrUpdateParameters',
'SharedAccessAuthorizationRuleListResult',
'SharedAccessAuthorizationRuleProperties',
'SharedAccessAuthorizationRuleResource',
'Sku',
'SubResource',
'WnsCredential',
'AccessRights',
'NamespaceType',
'SkuName',
]
|
python/15_bsearch/bsearch_recursion.py | shipan3452/algo | 22,028 | 27269 | <reponame>shipan3452/algo
"""
Author: dreamkong
"""
from typing import List
def bsearch(nums: List[int], target: int) -> int:
return bsearch_internally(nums, 0, len(nums)-1, target)
def bsearch_internally(nums: List[int], low: int, high: int, target: int) -> int:
if low > high:
return -1
mid = low+int((high-low) >> 2)
if nums[mid] == target:
return mid
elif nums[mid] < target:
return bsearch_internally(nums, mid+1, high, target)
else:
return bsearch_internally(nums, low, mid-1, target)
|
alphamind/model/data_preparing.py | rongliang-tech/alpha-mind | 186 | 27271 | # -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = pd.merge(df, industry_df, on=['trade_date', 'code'])
df['weight'] = df['weight'].fillna(0.)
df.dropna(inplace=True)
return dates, df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code', 'weight', 'industry_code', 'industry'] + transformer.names]
def batch_processing(names,
x_values,
y_values,
groups,
group_label,
batch,
risk_exp,
pre_process,
post_process,
codes):
train_x_buckets = {}
train_y_buckets = {}
train_risk_buckets = {}
predict_x_buckets = {}
predict_y_buckets = {}
predict_risk_buckets = {}
predict_codes_bucket = {}
for i, start in enumerate(groups[:-batch]):
end = groups[i + batch]
left_index = bisect.bisect_left(group_label, start)
right_index = bisect.bisect_left(group_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
train_x_buckets[end] = pd.DataFrame(factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process),
columns=names)
train_y_buckets[end] = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
train_risk_buckets[end] = this_risk_exp
left_index = bisect.bisect_right(group_label, start)
right_index = bisect.bisect_right(group_label, end)
sub_dates = group_label[left_index:right_index]
this_raw_x = x_values[left_index:right_index]
this_codes = codes[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
predict_x_buckets[end] = pd.DataFrame(ne_x[inner_left_index:inner_right_index],
columns=names)
if risk_exp is not None:
predict_risk_buckets[end] = this_risk_exp[inner_left_index:inner_right_index]
else:
predict_risk_buckets = None
predict_codes_bucket[end] = this_codes[inner_left_index:inner_right_index]
this_raw_y = y_values[left_index:right_index]
if len(this_raw_y) > 0:
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
predict_y_buckets[end] = ne_y[inner_left_index:inner_right_index]
return train_x_buckets, \
train_y_buckets, \
train_risk_buckets, \
predict_x_buckets, \
predict_y_buckets, \
predict_risk_buckets, \
predict_codes_bucket
def fetch_data_package(engine: SqlEngine,
alpha_factors: Iterable[object],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
batch: int = 1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
fit_target: Union[Transformer, object] = None) -> dict:
alpha_logger.info("Starting data package fetching ...")
transformer = Transformer(alpha_factors)
names = transformer.names
dates, target_df, factor_df = prepare_data(engine,
transformer,
start_date,
end_date,
frequency,
universe,
benchmark,
warm_start + batch,
fit_target=fit_target)
target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes = \
_merge_df(engine, names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
alpha_logger.info("data merging finished")
target_df['weight'] = train_x['weight']
target_df['industry'] = train_x['industry']
target_df['industry_code'] = train_x['industry_code']
if neutralized_risk:
for i, name in enumerate(neutralized_risk):
target_df.loc[:, name] = risk_exp[:, i]
alpha_logger.info("Loading data is finished")
train_x_buckets, train_y_buckets, train_risk_buckets, predict_x_buckets, predict_y_buckets, predict_risk_buckets, predict_codes_bucket \
= batch_processing(names,
x_values,
y_values,
dates,
date_label,
batch,
risk_exp,
pre_process,
post_process,
codes)
alpha_logger.info("Data processing is finished")
ret = dict()
ret['x_names'] = names
ret['settlement'] = target_df[target_df.trade_date >= start_date]
train_x_buckets = {k: train_x_buckets[k] for k in train_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_y_buckets = {k: train_y_buckets[k] for k in train_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_risk_buckets = {k: train_risk_buckets[k] for k in train_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_x_buckets = {k: predict_x_buckets[k] for k in predict_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_y_buckets = {k: predict_y_buckets[k] for k in predict_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
if neutralized_risk:
predict_risk_buckets = {k: predict_risk_buckets[k] for k in predict_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
else:
predict_risk_buckets = None
predict_codes_bucket = {k: predict_codes_bucket[k] for k in predict_codes_bucket if
k.strftime('%Y-%m-%d') >= start_date}
ret['train'] = {'x': train_x_buckets, 'y': train_y_buckets, 'risk': train_risk_buckets}
ret['predict'] = {'x': predict_x_buckets, 'y': predict_y_buckets, 'risk': predict_risk_buckets,
'code': predict_codes_bucket}
return ret
def fetch_train_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None) -> dict:
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
target_df, factor_df = df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code'] + transformer.names]
target_df, dates, date_label, risk_exp, x_values, y_values, _, _, codes = \
_merge_df(engine, transformer.names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
require(len(dates) >= 2, ValueError,
"No previous data for training for the date {0}".format(ref_date))
end = dates[-2]
start = dates[-batch - 1] if batch <= len(dates) - 1 else dates[0]
else:
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
index = (date_label >= start) & (date_label <= end)
this_raw_x = x_values[index]
this_raw_y = y_values[index]
this_code = codes[index]
if risk_exp is not None:
this_risk_exp = risk_exp[index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ret = dict()
ret['x_names'] = transformer.names
ret['train'] = {'x': pd.DataFrame(ne_x, columns=transformer.names), 'y': ne_y,
'code': this_code}
return ret
def fetch_predict_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fillna: str = None,
fit_target: Union[Transformer, object] = None):
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch - 1) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fillna:
factor_df = factor_df.groupby('trade_date').apply(
lambda x: x.fillna(x.median())).reset_index(
drop=True).dropna()
else:
factor_df = factor_df.dropna()
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
names = transformer.names
if neutralized_risk:
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(neutralized_risk).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_x = pd.merge(train_x, target_df, on=['trade_date', 'code'], how='left')
risk_exp = train_x[neutralized_risk].values.astype(float)
else:
train_x = pd.merge(factor_df, target_df, on=['trade_date', 'code'], how='left')
risk_exp = None
train_x.dropna(inplace=True, subset=train_x.columns[:-1])
x_values = train_x[names].values.astype(float)
y_values = train_x[['dx']].values.astype(float)
date_label = pd.DatetimeIndex(train_x.trade_date).to_pydatetime()
dates = np.unique(date_label)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
left_index = bisect.bisect_left(date_label, start)
right_index = bisect.bisect_right(date_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
sub_dates = date_label[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
ne_x = ne_x[inner_left_index:inner_right_index]
ne_y = ne_y[inner_left_index:inner_right_index]
left_index = bisect.bisect_left(date_label, end)
right_index = bisect.bisect_right(date_label, end)
codes = train_x.code.values[left_index:right_index]
else:
ne_x = None
ne_y = None
codes = None
ret = dict()
ret['x_names'] = transformer.names
ret['predict'] = {'x': pd.DataFrame(ne_x, columns=transformer.names, index=codes), 'code': codes,
'y': ne_y.flatten()}
return ret
|
Echoes/Filezilla.py | xeddmc/BrainDamage | 1,520 | 27277 | # Based on the work of https://github.com/AlessandroZ/LaZagne/blob/master/Windows/lazagne/
import xml.etree.cElementTree as ET
import os, base64
class Filezilla():
def __init__(self):
options = {'command': '-f', 'action': 'store_true', 'dest': 'filezilla', 'help': 'filezilla'}
def run(self):
if 'APPDATA' in os.environ:
directory = os.environ['APPDATA'] + '\FileZilla'
else:
return
interesting_xml_file = []
info_xml_file = []
if os.path.exists(os.path.join(directory, 'sitemanager.xml')):
interesting_xml_file.append('sitemanager.xml')
info_xml_file.append('Stores all saved sites server info including password in plaintext')
if os.path.exists(os.path.join(directory, 'recentservers.xml')):
interesting_xml_file.append('recentservers.xml')
info_xml_file.append('Stores all recent server info including password in plaintext')
if os.path.exists(os.path.join(directory, 'filezilla.xml')):
interesting_xml_file.append('filezilla.xml')
info_xml_file.append('Stores most recent server info including password in plaintext')
if interesting_xml_file != []:
pwdFound = []
for i in range(len(interesting_xml_file)):
xml_file = os.path.expanduser(directory + os.sep + interesting_xml_file[i])
tree = ET.ElementTree(file=xml_file)
root = tree.getroot()
servers = root.getchildren()
for ss in servers:
server = ss.getchildren()
jump_line = 0
for s in server:
s1 = s.getchildren()
values = {}
for s11 in s1:
if s11.tag == 'Host':
values[s11.tag] = s11.text
if s11.tag == 'Port':
values[s11.tag] = s11.text
if s11.tag == 'User':
values['Login'] = s11.text
if s11.tag == 'Pass':
try:
# if base64 encoding
if 'encoding' in s11.attrib:
if s11.attrib['encoding'] == 'base64':
values['Password'] = base64.b64decode(s11.text)
else:
values['Password'] = <PASSWORD>
except:
values['Password'] = <PASSWORD>
# password found
if len(values) != 0:
pwdFound.append(values)
# print the results
return pwdFound
else:
pass
#tem = Filezilla()
#a = tem.run()
#print a
|
libcity/model/trajectory_loc_prediction/SERM.py | moghadas76/test_bigcity | 221 | 27280 | <filename>libcity/model/trajectory_loc_prediction/SERM.py<gh_stars>100-1000
import torch
import torch.nn as nn
import numpy as np
from libcity.model.abstract_model import AbstractModel
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
class EmbeddingMatrix(nn.Module): # text_embdeding
def __init__(self, input_size, output_size, word_vec):
super(EmbeddingMatrix, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.layer = nn.Linear(in_features=self.input_size, out_features=self.output_size, bias=False)
self.init_weight(word_vec)
def init_weight(self, word_vec):
# word_vec为text_embedding初始权重矩阵,从data_feature传入.
# self.weight output_size*input_size namely length_of_wordvect_glove_pretrained(50)
# *text_size(the size of dictionary)
# 按照论文源代码 word_vec = text_size(the size of dictionary)*length_of_wordvect_glove_pretrained
word_vec = torch.Tensor(word_vec).t() # 转置
self.layer.weight = nn.Parameter(word_vec)
def forward(self, x): # x:batch*seq*input_size
# return torch.matmul(x, self.weights) #batch*seq*text_size * text_size*output_size = batch*seq*output_size
return self.layer(x) # batch*seq*output_size
class SERM(AbstractModel):
def __init__(self, config, data_feature):
super(SERM, self).__init__(config, data_feature)
# initialize parameters
# print(config['dataset_class'])
self.loc_size = data_feature['loc_size']
self.loc_emb_size = config['loc_emb_size']
self.tim_size = data_feature['tim_size']
self.tim_emb_size = config['tim_emb_size']
self.user_size = data_feature['uid_size']
self.user_emb_size = data_feature['loc_size'] # 根据论文
self.text_size = data_feature['text_size']
self.text_emb_size = len(data_feature['word_vec'][0]) # 这个受限于 word_vec 的长度
self.hidden_size = config['hidden_size']
self.word_one_hot_matrix = np.eye(self.text_size)
self.device = config['device']
# Embedding layer
self.emb_loc = nn.Embedding(num_embeddings=self.loc_size, embedding_dim=self.loc_emb_size,
padding_idx=data_feature['loc_pad'])
self.emb_tim = nn.Embedding(num_embeddings=self.tim_size, embedding_dim=self.tim_emb_size,
padding_idx=data_feature['tim_pad'])
self.emb_user = nn.Embedding(num_embeddings=self.user_size, embedding_dim=self.user_emb_size)
self.emb_text = EmbeddingMatrix(self.text_size, self.text_emb_size, data_feature['word_vec'])
# lstm layer
self.lstm = nn.LSTM(input_size=self.loc_emb_size + self.tim_emb_size + self.text_emb_size,
hidden_size=self.hidden_size)
# self.lstm = nn.LSTM(input_size=self.loc_emb_size + self.tim_emb_size, hidden_size=self.hidden_size)
# dense layer
self.dense = nn.Linear(in_features=self.hidden_size, out_features=self.loc_size)
# init weight
self.apply(self._init_weight)
def _init_weight(self, module):
if isinstance(module, nn.Embedding):
nn.init.xavier_normal_(module.weight)
elif isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
elif isinstance(module, nn.LSTM):
for name, param in module.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
nn.init.constant_(param.data, 0)
def forward(self, batch):
loc = batch['current_loc']
tim = batch['current_tim']
user = batch['uid']
text = batch['text']
max_len = batch['current_loc'].shape[1]
text_pad = np.zeros((self.text_size))
# text 现在是 word index 的形式,还需要进行 one_hot encoding
one_hot_text = []
for word_index in text:
one_hot_text_a_slice = []
for words in word_index:
if len(words) == 0:
one_hot_text_a_slice.append(np.zeros((self.text_size)))
else:
one_hot_text_a_slice.append(np.sum(self.word_one_hot_matrix[words], axis=0) /
len(words))
# pad
one_hot_text_a_slice += [text_pad] * (max_len - len(one_hot_text_a_slice))
one_hot_text.append(np.array(one_hot_text_a_slice)) # batch_size * seq_len * text_size
one_hot_text = torch.FloatTensor(one_hot_text).to(self.device)
loc_emb = self.emb_loc(loc)
tim_emb = self.emb_tim(tim)
user_emb = self.emb_user(user)
text_emb = self.emb_text(one_hot_text)
# change batch*seq*emb_size to seq*batch*emb_size
x = torch.cat([loc_emb, tim_emb, text_emb], dim=2).permute(1, 0, 2)
# attrs_latent = torch.cat([loc_emb, tim_emb], dim=2).permute(1, 0, 2)
# print(attrs_latent.size())
# pack attrs_latent
seq_len = batch.get_origin_len('current_loc')
pack_x = pack_padded_sequence(x, lengths=seq_len, enforce_sorted=False)
lstm_out, (h_n, c_n) = self.lstm(pack_x) # seq*batch*hidden_size
# print(lstm_out.size())
# unpack
lstm_out, out_len = pad_packed_sequence(lstm_out, batch_first=True)
# user_emb is batch*loc_size, so we need get the final lstm_out
for i in range(lstm_out.shape[0]):
if i == 0:
out = lstm_out[0][seq_len[i] - 1].reshape(1, -1) # .reshape(1,-1)表示:转化为1行
else:
out = torch.cat((out, lstm_out[i][seq_len[i] - 1].reshape(1, -1)), 0)
dense = self.dense(out) # batch * loc_size
out_vec = torch.add(dense, user_emb) # batch * loc_size
pred = nn.LogSoftmax(dim=1)(out_vec) # result
# print(pred.size())
return pred # batch*loc_size
def predict(self, batch):
return self.forward(batch)
def calculate_loss(self, batch):
criterion = nn.NLLLoss()
scores = self.forward(batch) # batch*loc_size
return criterion(scores, batch['target'])
|
src/api/test/test_datahub_serializer.py | RogerTangos/datahub-stub | 192 | 27300 | <filename>src/api/test/test_datahub_serializer.py<gh_stars>100-1000
from mock import patch
from django.test import TestCase
from ..serializer import DataHubSerializer
class DataHubSerializerTests(TestCase):
"""Test DataHubSerializer methods"""
def setUp(self):
self.username = "delete_me_username"
self.repo_base = "delete_me_repo_base"
self.password = "<PASSWORD>"
self.mock_manager = self.create_patch(
'api.serializer.DataHubManager')
self.serializer = DataHubSerializer(
username=self.username, repo_base=self.repo_base)
def create_patch(self, name):
# helper method for creating patches
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_initialization(self):
dataHubSerializer = DataHubSerializer(
username=self.username, repo_base=self.repo_base)
self.assertEqual(dataHubSerializer.username, self.username)
self.assertEqual(dataHubSerializer.repo_base, self.repo_base)
self.assertEqual(
self.mock_manager.call_args[1]['repo_base'], self.repo_base)
self.assertEqual(
self.mock_manager.call_args[1]['user'], self.username)
|
cflearn/api/ml/interface.py | carefree0910/carefree-learn | 400 | 27370 | import os
import json
import shutil
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from typing import NamedTuple
from tqdm.autonotebook import tqdm
from cfdata.tabular import TabularData
from cftool.ml import ModelPattern
from cftool.ml import EnsemblePattern
from cftool.dist import Parallel
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from cftool.ml.utils import patterns_type
from cftool.ml.utils import Comparer
from cftool.ml.utils import Estimator
from .pipeline import SimplePipeline
from .pipeline import CarefreePipeline
from ...data import MLData
from ...data import MLInferenceData
from ...trainer import get_sorted_checkpoints
from ...constants import SCORES_FILE
from ...constants import WARNING_PREFIX
from ...constants import CHECKPOINTS_FOLDER
from ...constants import ML_PIPELINE_SAVE_NAME
from ...dist.ml import Experiment
from ...dist.ml import ExperimentResults
from ...misc.toolkit import to_2d
from ...misc.toolkit import get_latest_workplace
from ...models.ml.protocol import MLCoreProtocol
def register_core(name: str) -> Callable[[Type], Type]:
return MLCoreProtocol.register(name)
pipelines_type = Dict[str, List[SimplePipeline]]
various_pipelines_type = Union[
SimplePipeline,
List[SimplePipeline],
Dict[str, SimplePipeline],
pipelines_type,
]
def _to_pipelines(pipelines: various_pipelines_type) -> pipelines_type:
if isinstance(pipelines, dict):
pipeline_dict = {}
for key, value in pipelines.items():
if isinstance(value, list):
pipeline_dict[key] = value
else:
pipeline_dict[key] = [value]
else:
if not isinstance(pipelines, list):
pipelines = [pipelines]
pipeline_dict = {}
for pipeline in pipelines:
assert pipeline.model is not None
key = pipeline.model.__identifier__
pipeline_dict.setdefault(key, []).append(pipeline)
return pipeline_dict
def evaluate(
data: Union[MLData, MLInferenceData],
*,
metrics: Union[str, List[str]],
metric_configs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
contains_labels: bool = True,
pipelines: Optional[various_pipelines_type] = None,
predict_config: Optional[Dict[str, Any]] = None,
other_patterns: Optional[Dict[str, patterns_type]] = None,
comparer_verbose_level: Optional[int] = 1,
) -> Comparer:
if not contains_labels:
err_msg = "`cflearn.evaluate` must be called with `contains_labels = True`"
raise ValueError(err_msg)
if metric_configs is None:
metric_configs = [{} for _ in range(len(metrics))]
patterns = {}
x, y = data.x_train, data.y_train
if pipelines is None:
msg = None
if y is None:
msg = "either `pipelines` or `y` should be provided"
if other_patterns is None:
msg = "either `pipelines` or `other_patterns` should be provided"
if msg is not None:
raise ValueError(msg)
else:
pipelines = _to_pipelines(pipelines)
# get data
# TODO : different pipelines may have different labels
if y is not None:
y = to_2d(y)
else:
if not isinstance(x, str):
raise ValueError("`x` should be str when `y` is not provided")
data_pipeline = list(pipelines.values())[0][0]
if not isinstance(data_pipeline, CarefreePipeline):
raise ValueError("only `CarefreePipeline` can handle file inputs")
cf_data = data_pipeline.cf_data
assert cf_data is not None
x, y = cf_data.read_file(x, contains_labels=contains_labels)
y = cf_data.transform(x, y).y
# get metrics
if predict_config is None:
predict_config = {}
predict_config.setdefault("contains_labels", contains_labels)
for name, pipeline_list in pipelines.items():
patterns[name] = [
pipeline.to_pattern(**predict_config) for pipeline in pipeline_list
]
if other_patterns is not None:
for other_name in other_patterns.keys():
if other_name in patterns:
print(
f"{WARNING_PREFIX}'{other_name}' is found in "
"`other_patterns`, it will be overwritten"
)
update_dict(other_patterns, patterns)
if isinstance(metrics, list):
metrics_list = metrics
else:
assert isinstance(metrics, str)
metrics_list = [metrics]
if isinstance(metric_configs, list):
metric_configs_list = metric_configs
else:
assert isinstance(metric_configs, dict)
metric_configs_list = [metric_configs]
estimators = [
Estimator(metric, metric_config=metric_config)
for metric, metric_config in zip(metrics_list, metric_configs_list)
]
comparer = Comparer(patterns, estimators)
comparer.compare(data, y, verbose_level=comparer_verbose_level)
return comparer
def task_loader(
workplace: str,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
compress: bool = True,
) -> SimplePipeline:
export_folder = os.path.join(workplace, ML_PIPELINE_SAVE_NAME)
m = pipeline_base.load(export_folder=export_folder, compress=compress)
assert isinstance(m, SimplePipeline)
return m
def load_experiment_results(
results: ExperimentResults,
pipeline_base: Type[SimplePipeline],
) -> pipelines_type:
pipelines_dict: Dict[str, Dict[int, SimplePipeline]] = {}
iterator = list(zip(results.workplaces, results.workplace_keys))
for workplace, workplace_key in tqdm(iterator, desc="load"):
pipeline = task_loader(workplace, pipeline_base)
model, str_i = workplace_key
pipelines_dict.setdefault(model, {})[int(str_i)] = pipeline
return {k: [v[i] for i in sorted(v)] for k, v in pipelines_dict.items()}
class RepeatResult(NamedTuple):
data: Optional[TabularData]
experiment: Optional[Experiment]
pipelines: Optional[Dict[str, List[SimplePipeline]]]
patterns: Optional[Dict[str, List[ModelPattern]]]
def repeat_with(
data: MLData,
*,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
workplace: str = "_repeat",
models: Union[str, List[str]] = "fcnn",
model_configs: Optional[Dict[str, Dict[str, Any]]] = None,
predict_config: Optional[Dict[str, Any]] = None,
sequential: Optional[bool] = None,
num_jobs: int = 1,
num_repeat: int = 5,
return_patterns: bool = True,
compress: bool = True,
use_tqdm: bool = True,
available_cuda_list: Optional[List[int]] = None,
resource_config: Optional[Dict[str, Any]] = None,
task_meta_kwargs: Optional[Dict[str, Any]] = None,
is_fix: bool = False,
**kwargs: Any,
) -> RepeatResult:
if os.path.isdir(workplace) and not is_fix:
print(f"{WARNING_PREFIX}'{workplace}' already exists, it will be erased")
shutil.rmtree(workplace)
kwargs = shallow_copy_dict(kwargs)
if isinstance(models, str):
models = [models]
if sequential is None:
sequential = num_jobs <= 1
if model_configs is None:
model_configs = {}
def is_buggy(i_: int, model_: str) -> bool:
i_workplace = os.path.join(workplace, model_, str(i_))
i_latest_workplace = get_latest_workplace(i_workplace)
if i_latest_workplace is None:
return True
checkpoint_folder = os.path.join(i_latest_workplace, CHECKPOINTS_FOLDER)
if not os.path.isfile(os.path.join(checkpoint_folder, SCORES_FILE)):
return True
if not get_sorted_checkpoints(checkpoint_folder):
return True
return False
def fetch_config(core_name: str) -> Dict[str, Any]:
local_kwargs = shallow_copy_dict(kwargs)
assert model_configs is not None
local_core_config = model_configs.setdefault(core_name, {})
local_kwargs["core_name"] = core_name
local_kwargs["core_config"] = shallow_copy_dict(local_core_config)
return shallow_copy_dict(local_kwargs)
pipelines_dict: Optional[Dict[str, List[SimplePipeline]]] = None
if sequential:
cuda = kwargs.pop("cuda", None)
experiment = None
tqdm_settings = kwargs.setdefault("tqdm_settings", {})
tqdm_settings["tqdm_position"] = 2
if not return_patterns:
print(
f"{WARNING_PREFIX}`return_patterns` should be "
"True when `sequential` is True, because patterns "
"will always be generated"
)
return_patterns = True
pipelines_dict = {}
if not use_tqdm:
iterator = models
else:
iterator = tqdm(models, total=len(models), position=0)
for model in iterator:
local_pipelines = []
sub_iterator = range(num_repeat)
if use_tqdm:
sub_iterator = tqdm(
sub_iterator,
total=num_repeat,
position=1,
leave=False,
)
for i in sub_iterator:
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
local_workplace = os.path.join(workplace, model, str(i))
local_config.setdefault("workplace", local_workplace)
m = pipeline_base(**local_config)
m.fit(data, cuda=cuda)
local_pipelines.append(m)
pipelines_dict[model] = local_pipelines
else:
if num_jobs <= 1:
print(
f"{WARNING_PREFIX}we suggest setting `sequential` "
f"to True when `num_jobs` is {num_jobs}"
)
# data
data_folder = Experiment.dump_data_bundle(
data.x_train,
data.y_train,
data.x_valid,
data.y_valid,
workplace=workplace,
)
# experiment
experiment = Experiment(
num_jobs=num_jobs,
available_cuda_list=available_cuda_list,
resource_config=resource_config,
)
for model in models:
for i in range(num_repeat):
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
experiment.add_task(
model=model,
compress=compress,
root_workplace=workplace,
workplace_key=(model, str(i)),
config=local_config,
data_folder=data_folder,
**(task_meta_kwargs or {}),
)
# finalize
results = experiment.run_tasks(use_tqdm=use_tqdm)
if return_patterns:
pipelines_dict = load_experiment_results(results, pipeline_base)
patterns = None
if return_patterns:
assert pipelines_dict is not None
if predict_config is None:
predict_config = {}
patterns = {
model: [m.to_pattern(**predict_config) for m in pipelines]
for model, pipelines in pipelines_dict.items()
}
cf_data = None
if patterns is not None:
m = patterns[models[0]][0].model
if isinstance(m, CarefreePipeline):
cf_data = m.cf_data
return RepeatResult(cf_data, experiment, pipelines_dict, patterns)
def pack_repeat(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_jobs: int = 1,
) -> List[str]:
sub_workplaces = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplaces.append(get_latest_workplace(stuff_path))
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def pick_from_repeat_and_pack(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_pick: int,
num_jobs: int = 1,
) -> List[str]:
score_workplace_pairs = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplace = get_latest_workplace(stuff_path)
assert sub_workplace is not None, "internal error occurred"
score_path = os.path.join(sub_workplace, CHECKPOINTS_FOLDER, SCORES_FILE)
with open(score_path, "r") as f:
score = float(max(json.load(f).values()))
score_workplace_pairs.append((score, sub_workplace))
score_workplace_pairs = sorted(score_workplace_pairs)[::-1]
sub_workplaces = [pair[1] for pair in score_workplace_pairs[:num_pick]]
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def make_toy_model(
model: str = "fcnn",
config: Optional[Dict[str, Any]] = None,
*,
pipeline_type: str = "ml.carefree",
is_classification: bool = False,
cf_data_config: Optional[Dict[str, Any]] = None,
data_tuple: Optional[Tuple[np.ndarray, np.ndarray]] = None,
cuda: Optional[str] = None,
) -> SimplePipeline:
if config is None:
config = {}
if data_tuple is not None:
x_np, y_np = data_tuple
else:
if not is_classification:
x, y = [[0]], [[1.0]]
else:
x, y = [[0], [1]], [[1], [0]]
x_np, y_np = map(np.array, [x, y])
model_config = {}
if model in ("fcnn", "tree_dnn"):
model_config = {
"hidden_units": [100],
"batch_norm": False,
"dropout": 0.0,
}
base_config = {
"core_name": model,
"core_config": model_config,
"output_dim": 1 + int(is_classification),
"num_epoch": 2,
"max_epoch": 4,
}
updated = update_dict(config, base_config)
m = SimplePipeline.make(pipeline_type, updated)
assert isinstance(m, SimplePipeline)
if cf_data_config is None:
cf_data_config = {}
cf_data_config = update_dict(
cf_data_config,
dict(
valid_columns=list(range(x_np.shape[1])),
label_process_method="identical",
),
)
data = MLData.with_cf_data(
x_np,
y_np,
is_classification=is_classification,
cf_data_config=cf_data_config,
valid_split=0.0,
)
m.fit(data, cuda=cuda)
return m
__all__ = [
"register_core",
"evaluate",
"task_loader",
"load_experiment_results",
"repeat_with",
"pack_repeat",
"pick_from_repeat_and_pack",
"make_toy_model",
"ModelPattern",
"EnsemblePattern",
]
|
test/testing/test_pandas_assert.py | S-aiueo32/gokart | 255 | 27390 | <reponame>S-aiueo32/gokart
import unittest
import pandas as pd
import gokart
class TestPandasAssert(unittest.TestCase):
def test_assert_frame_contents_equal(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
gokart.testing.assert_frame_contents_equal(resulted, expected)
def test_assert_frame_contents_equal_with_small_error(self):
expected = pd.DataFrame(data=dict(f1=[1.0001, 2.0001, 3.0001], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2.0002, 1.0002, 3.0002], f3=[222, 111, 333]), index=[1, 0, 2])
gokart.testing.assert_frame_contents_equal(resulted, expected, atol=1e-1)
def test_assert_frame_contents_equal_with_duplicated_columns(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
expected.columns = ['f1', 'f1', 'f2']
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
resulted.columns = ['f2', 'f1', 'f1']
with self.assertRaises(AssertionError):
gokart.testing.assert_frame_contents_equal(resulted, expected)
def test_assert_frame_contents_equal_with_duplicated_indexes(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
expected.index = [0, 1, 1]
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
expected.index = [1, 0, 1]
with self.assertRaises(AssertionError):
gokart.testing.assert_frame_contents_equal(resulted, expected)
|
WebMirror/util/StatusUpdater/Updater.py | awesome-archive/ReadableWebProxy | 193 | 27430 |
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import pickle
from common import database
import config
import common.LogBase
import WebMirror.rules
from WebMirror.OutputFilters.util.MessageConstructors import pack_message
import WebMirror.TimedTriggers.TriggerBase
import common.get_rpyc
# import WebMirror.OutputFilters.AmqpInterface
class MetaUpdater(WebMirror.TimedTriggers.TriggerBase.TriggerBaseClass):
pluginName = "Meta Updater"
loggerPath = 'MetaUpdater'
def __init__(self):
super().__init__()
# print()
self.rpc_interface = common.get_rpyc.RemoteJobInterface("FeedUpdater")
# if config.C_DO_RABBIT:
# print("No message queue! Doing independent RabbitMQ connection!")
# # traceback.print_stack()
# # print("Wat?")
# # print()
# self.msg_q = False
# amqp_settings = {
# "RABBIT_LOGIN" : config.C_RABBIT_LOGIN,
# "RABBIT_PASWD" : config.C_RABBIT_PASWD,
# "RABBIT_SRVER" : config.C_RABBIT_SRVER,
# "RABBIT_VHOST" : config.C_RABBIT_VHOST,
# 'taskq_task' : 'task.master.q',
# 'taskq_response' : 'response.master.q',
# }
# self._amqpint = WebMirror.OutputFilters.AmqpInterface.RabbitQueueHandler(amqp_settings)
def get_feed_count_message(self):
feeds = set()
for ruleset in WebMirror.rules.load_rules():
feeds |= set(ruleset['feedurls'])
data = {
"feed-count" : len(feeds)
}
return pack_message("system-feed-counts", data)
def get_times(self):
with common.database.session_context() as conn:
aps = conn.execute("SELECT job_state FROM apscheduler_jobs;")
update_times = []
for blob, in aps:
job_dict = pickle.loads(blob)
update_times.append((
job_dict['id'],
job_dict['next_run_time'].isoformat()
))
data = {
"update-times" : update_times,
}
database.delete_db_session()
return pack_message("system-update-times", data)
def go(self):
feeds = self.get_feed_count_message()
times = self.get_times()
self.rpc_interface.put_feed_job(feeds)
self.rpc_interface.put_feed_job(times)
# self._amqpint.put_item(feeds)
# self._amqpint.put_item(times)
def do_meta_update():
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
updator = MetaUpdater()
updator._go()
if __name__ == '__main__':
do_meta_update()
|
data_structures/heap/heap_using_heapq.py | ruler30cm/python-ds | 1,723 | 27469 | <reponame>ruler30cm/python-ds
"""
Heap in python using heapq library function
Note: by default, heapq creates a min-heap. To make it a
max-heap, add items after multiplying them by -1
"""
from heapq import heappop, heappush, heapify
heap = []
heapify(heap)
heappush(heap, 10)
heappush(heap, 11)
heappush(heap, 2)
heappush(heap, 4)
heappush(heap, 14)
heappush(heap, 1)
print('first element - ', heap[0])
print('popping min element - ', heappop(heap))
print('first element - ', heap[0])
# Heap prints as an array and can be access using indexes
print(heap)
print(heap[2])
|
S2.Surface_Normal/regNormalNet/regNormalNet.py | leoshine/Spherical_Regression | 133 | 27496 | <filename>S2.Surface_Normal/regNormalNet/regNormalNet.py<gh_stars>100-1000
# coding: utf8
"""
@Author : <NAME>
"""
import os
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
from basic.common import rdict
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
from basic.common import add_path, env
this_dir = os.path.dirname(os.path.abspath(__file__))
add_path(this_dir+'/../lib/')
from helper import *
from model import VGG16_Trunk
from modelSE import VGG16_Trunk as VGG16SE_Trunk
# net_arch2Trunk = dict(
# vgg16 = VGG16_Trunk,
# vgg16se = VGG16SE_Trunk,
# )
net_arch2Trunk = dict(
vgg16=dict(
Sflat = VGG16_Trunk,
Sexp = VGG16SE_Trunk,
),
)
from pytorch_util.libtrain import copy_weights, init_weights_by_filling
from pytorch_util.torch_v4_feature import LocalResponseNorm # *
from pytorch_util.torch_3rd_layers import Maskout
from pytorch_util.torch_3rd_funcs import norm2unit, exp_Normalization
def cls_pred(output, topk=(1,), dim=1):
maxk = max(topk)
batch_size = output.size(0)
_, pred = output.topk(maxk, dim=dim, largest=True, sorted=True)
return pred
class _regNormalNet(nn.Module):
def __init__(self, method, net_arch='vgg16', init_weights=True):
super(_regNormalNet, self).__init__()
_Trunk = net_arch2Trunk[net_arch][method]
self.trunk = _Trunk(init_weights=init_weights)
def forword(self, x, label):
raise NotImplementedError
#---------------------------------------------------------------------[regQuat]
class reg_Sflat_Net(_regNormalNet):
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sflat', net_arch=net_arch, init_weights=init_weights)
# loss module
self.loss_handler = Cos_Proximity_Loss_Handler()
self.targets = ['norm']
def forward(self, x):
"""label shape (batchsize, ) """
x = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x.size(0) # x of shape (40, 3, 240, 320)
#-- Normalize coordinate to a unit
x_norm = norm2unit(x, dim=1)
Prob = edict(norm=x_norm.permute(0,2,3,1).double()) # transpose prediction from BxCxHxW to BxHxWxC order.
return Prob
def compute_loss(self, Prob, GT):
Loss, Errs = self.loss_handler.compute_loss(self.targets, Prob, GT)
_metric_ = edict(norm=Errs['norm'])
return Loss, _metric_
def compute_pred(self, Prob, encode_bit=8):
x_norm = Prob['norm']
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
#---------------------------------------------------------------------[regQuat]
class reg_Sexp_Net(_regNormalNet): # Spherical exponential Problem + sign classification
def __init__(self, net_arch='vgg16', init_weights=True):
_regNormalNet.__init__(self, 'Sexp', net_arch=net_arch, init_weights=init_weights)
self.reg_n_D = 3
# Note: for a surface normal (x,z,y) (Watch out the order)
# z should always satisfy z<=0 (Surface normal should from visible surfaces)
# Thus only x,y need sign prediction.
dim_need_sign = 2
_signs = list( product(*( [(-1,1)]*dim_need_sign )) ) # [(-1, -1), (-1, 1), (1, -1), (1, 1)], with len=4
self.signs = [(x[0],-1,x[1]) for x in _signs] # y-z-x order: [(-1, -1, -1), (-1, -1, 1), (1, -1, -1), (1, -1, 1)], with len=4; z always -1
self.signs2label = odict(zip(self.signs, range(len(self.signs))))
self.label2signs = Variable( torch.DoubleTensor(self.signs) ).cuda() # make it as a Variable
self.softmax = nn.Softmax(dim=1).cuda()
# loss module
self.loss_handler_abs_norm = Cos_Proximity_Loss_Handler()
self.loss_handler_sgc_norm = Cross_Entropy_Loss_Handler()
self.targets = ['sgc_norm','abs_norm']
self.gt_targets = ['norm']
self.cost, self.sint = torch.tensor(np.cos(np.pi/4)).double().cuda(), torch.tensor(np.sin(np.pi/4)).double().cuda()
def forward(self, x):
"""label shape (batchsize, ) """
x_abs, x_sgc = self.trunk(x) # Forward Conv and Fc6,Fc7
#
batchsize = x_abs.size(0)
#-- Exp and Normalize coordinate to a unit
x_sqr_norm = self.softmax(x_abs) #, nr_cate=self.nr_cate)
# sign category head (totally 4 category)
x_sgc_norm = x_sgc
Prob = edict(abs_norm=torch.sqrt(x_sqr_norm).permute(0,2,3,1).double(), # B,H,W,3
sgc_norm=x_sgc_norm.permute(0,2,3,1) ) # B,H,W,4
return Prob
def compute_loss(self, Prob, GT):
B,H,W,_3_ = GT.norm.size()
assert _3_==3, "Wrong dim: %s,%s,%s,%s" % (B,H,W,_3_)
# First get sign label from GT
#== Formulate squared value of quaternion
GT_abs_norm = torch.abs(GT.norm) # B,H,W,3
#== Formulate signs label of quaternion
GT_sign_norm = torch.sign(GT.norm) # B,H,W,3
#-------------------------------------
# hard coded: sign to label
#-------------------------------------
# y x label
# [-1 -1] --> 0
# [-1 1] --> 1
# [ 1 -1] --> 2
# [ 1 1] --> 3
# GT_sign_norm (B,H,W,3) in y-z-x order
GT_sign_norm[GT_sign_norm==0] = -1 # make sign of '0' as -1 (use -1 instead of 1 just because z<=0)
y_sign, x_sign = GT_sign_norm[:,:,:,0], GT_sign_norm[:,:,:,2]
y_sign += 1 # [y_sign==-1]
x_sign[x_sign==-1] = 0
GT_sgc_norm = (y_sign+x_sign).long() # data with shape with (B,H,W) index of [0,1,2,3]
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_norm'.
_GT = edict(abs_norm=GT_abs_norm, sgc_norm=GT_sgc_norm, mask=GT.mask) # abs_norm: (B,H,W,3) sgc_norm: (B,H,W)
Loss_abs_norm, abs_Errs = self.loss_handler_abs_norm.compute_loss(['abs_norm'], Prob, _GT)
Loss_sgc_norm = self.loss_handler_sgc_norm.compute_loss(['sgc_norm'], Prob, _GT)
# ----------------------------------------
# Compute the metric.
sign_ind = cls_pred(Prob['sgc_norm'], topk=(1,), dim=3).data.squeeze(dim=3) # B,H,W
pr_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3) (10, 240, 320, 3)
pr_abs_norm = Prob['abs_norm']
_Prob = edict(norm=pr_abs_norm * pr_sign_norm) # current predicted final norm (applied sign prediction)
_Loss_norm, out_Errs = self.loss_handler_abs_norm.compute_loss(['norm'], _Prob, GT) # just borrow loss_handler_abs_norm, nothing more.
# Compute acc of classification: sign_ind vs GT_sgc_norm
mask = GT['mask']
acc = eval_cls(sign_ind[mask], GT_sgc_norm[mask])
_metric_ = edict(abs_norm = abs_Errs['abs_norm'],
norm = out_Errs['norm'] ,
sgc_norm_acc = acc ,)
# To add loss weights here.
Loss = edict( abs_norm=Loss_abs_norm['abs_norm']*10, # / 5.
sgc_norm=Loss_sgc_norm['sgc_norm'], )
return Loss, _metric_ # .update(abs_Errs)
def compute_pred(self, Prob, encode_bit=8):
x_abs_norm = Prob['abs_norm'] # B,H,W,3
x_sgc_norm = Prob['sgc_norm'] # B,H,W,4
batchsize = x_abs_norm.size(0)
#
sign_ind = cls_pred(x_sgc_norm, topk=(1,), dim=3).data.squeeze(dim=3) # .view(-1,) # B,H,W
x_sign_norm = self.label2signs[sign_ind] # magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3)
#
x_norm = x_abs_norm * x_sign_norm # B,H,W,3
# --------------Recover rot45 trick --------------
# Note: since we applied rot45 trick, here we recover it back
_x_norm = x_norm.detach().clone() # return a copy of x_norm without grad
_y,_z,_x = _x_norm[:,:,:,0],_x_norm[:,:,:,1],_x_norm[:,:,:,2]
y, z, x = x_norm[:,:,:,0],x_norm[:,:,:,1],x_norm[:,:,:,2]
x[:] = self.cost*_x - self.sint*_y
y[:] = self.sint*_x + self.cost*_y
# ------------------------------------------------
# Get cpu data.
norm = x_norm.data.cpu().numpy().copy() # B,H,W,C
assert encode_bit in [8,16]
if encode_bit==8:
normImgs = ((norm+1)*(2**7)).astype(np.uint8) # map [-1,1] to [0,256)
else:
normImgs = ((norm+1)*(2**15)).astype(np.uint16) # map [-1,1] to [0,65535)
Pred = edict(norm=normImgs)
return Pred
|
timemachines/skaters/orbt/orbitlgtskaterfactory.py | iklasky/timemachines | 253 | 27524 |
from timemachines.skaters.orbt.orbitinclusion import using_orbit
if using_orbit:
from timemachines.skaters.orbt.orbitwrappers import orbit_lgt_iskater
from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, R_TYPE, E_TYPE, T_TYPE
from timemachines.skatertools.batch.batchskater import batch_skater_factory
def orbit_lgt_skater_factory(y: Y_TYPE, s, k: int, a: A_TYPE = None, t: T_TYPE = None, e: E_TYPE = None, r: R_TYPE = None,
emp_mass=0.0,
seasonality=None):
return batch_skater_factory(y=y, s=s, k=k, a=a, t=t, e=e, r=r, emp_mass=emp_mass,
iskater=orbit_lgt_iskater,
iskater_kwargs={'seasonality': seasonality},
min_e=0, n_warm=20)
def orbit_lgt_12(y,s,k,a=None, t=None,e=None):
return orbit_lgt_skater_factory(y=y, s=s, k=k, a=a,t=t,e=e, seasonality=12)
def orbit_lgt_24(y,s,k,a=None, t=None,e=None):
return orbit_lgt_skater_factory(y, s, k, a=a,t=t,e=e, seasonality=24)
|
sponsors/migrations/0038_auto_20210827_1223.py | Manny27nyc/pythondotorg | 911 | 27531 | # Generated by Django 2.0.13 on 2021-08-27 12:23
from django.db import migrations
def populate_sponsorship_package_fk(apps, schema_editor):
Sponsorship = apps.get_model('sponsors.Sponsorship')
SponsorshipPackage = apps.get_model('sponsors.SponsorshipPackage')
for sponsorship in Sponsorship.objects.all().iterator():
try:
package = SponsorshipPackage.objects.get(name=sponsorship.level_name)
sponsorship.package = package
sponsorship.save()
except SponsorshipPackage.DoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0037_sponsorship_package'),
]
operations = [
migrations.RunPython(populate_sponsorship_package_fk, migrations.RunPython.noop)
]
|
backpack/extensions/secondorder/diag_ggn/permute.py | jabader97/backpack | 395 | 27565 | """Module defining DiagGGNPermute."""
from backpack.core.derivatives.permute import PermuteDerivatives
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
class DiagGGNPermute(DiagGGNBaseModule):
"""DiagGGN extension of Permute."""
def __init__(self):
"""Initialize."""
super().__init__(derivatives=PermuteDerivatives())
|
analysis_engine/prepare_history_dataset.py | virdesai/stock-analysis-engine | 819 | 27577 | """
Helper for loading a ``Trading History`` dataset
"""
import json
import zlib
import pandas as pd
import analysis_engine.consts as ae_consts
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
def prepare_history_dataset(
data,
compress=False,
encoding='utf-8',
convert_to_dict=False,
include_keys=None,
ignore_keys=None,
convert_to_dates=None,
verbose=False):
"""prepare_history_dataset
Load a ``Trading History`` dataset into a dictionary
with a ``pd.DataFrame`` for the trading history record
list
:param data: string holding contents of a ``Trading History``
from a file, s3 key or redis-key
:param compress: optional - boolean flag for decompressing
the contents of the ``data`` if necessary
(default is ``False`` and algorithms
use ``zlib`` for compression)
:param convert_to_dict: optional - bool for s3 use ``False``
and for files use ``True``
:param encoding: optional - string for data encoding
:param include_keys: optional - list of string keys
to include before from the dataset
.. note:: tickers are automatically included in the ``pd.DataFrame``
:param ignore_keys: optional - list of string keys
to remove before building the ``pd.DataFrame``
:param convert_to_dates: optional - list of string keys
to convert to datetime before building the ``pd.DataFrame``
:param verbose: optional - bool show the logs
(default is ``False``)
"""
if verbose:
log.debug('start')
use_data = None
parsed_data = None
data_as_dict = None
if compress:
if verbose:
log.debug('decompressing')
parsed_data = zlib.decompress(
data).decode(
encoding)
else:
parsed_data = data
if not parsed_data:
log.error('failed parsing')
return None
if verbose:
log.debug('loading as dict')
use_data = {}
if convert_to_dict:
try:
data_as_dict = json.loads(parsed_data)
except Exception as e:
if (
'the JSON object must be str, bytes or '
'bytearray, not') in str(e):
log.critical(
f'failed decoding json for string - double '
f'compression for history dataset found ex={e}')
data_as_dict = parsed_data
else:
data_as_dict = parsed_data
if len(data_as_dict) == 0:
log.error(
'empty trading history dictionary')
return use_data
convert_these_date_keys = [
'date',
'minute',
'exp_date'
]
use_include_keys = [
'tickers',
'version',
'last_trade_data',
'algo_config_dict',
'algo_name',
'created'
]
if include_keys:
use_include_keys = include_keys
use_ignore_keys = []
if ignore_keys:
use_ignore_keys = ignore_keys
for k in data_as_dict:
if k in use_include_keys:
use_data[k] = data_as_dict[k]
all_records = []
num_records = 0
for ticker in data_as_dict['tickers']:
if ticker not in use_data:
use_data[ticker] = []
for node in data_as_dict[ticker]:
for ignore in use_ignore_keys:
node.pop(ignore, None)
all_records.append(node)
# end for all datasets on this date to load
num_records = len(all_records)
if num_records:
if verbose:
log.info(f'found records={num_records}')
history_df = pd.DataFrame(all_records)
for dc in convert_these_date_keys:
if dc in history_df:
history_df[dc] = pd.to_datetime(
history_df[dc],
format=ae_consts.COMMON_TICK_DATE_FORMAT)
# end of converting all date columns
use_data[ticker] = history_df
else:
log.error(
f'did not find any records={num_records} in history dataset')
# end for all tickers in the dataset
return use_data
# end of prepare_history_dataset
|
scripts/count_git_changes.py | ShankarNara/shogun | 2,753 | 27578 | <reponame>ShankarNara/shogun
import os
import sys
insertions=0
deletions=0
files=0
FROMVER=""
if len(sys.argv)>1:
FROMVER=sys.argv[1]
TOVER=""
if len(sys.argv)>2:
TOVER=sys.argv[2]
TMPNAME=os.tmpnam()
VER=""
if len(FROMVER)>0:
VER=FROMVER+'..'
if len(TOVER)>0:
if len(VER)==0:
VER='..'
VER=VER+TOVER
os.system('git log --oneline --shortstat %s >%s' % (VER,TMPNAME))
for line in file(TMPNAME).readlines():
if line.find('file') == -1:
continue
if line.find('changed') == -1:
continue
if line.find('insertion') == -1 and line.find('deletion') == -1:
continue
entries=line.split(',')
for e in entries:
if e.find('file') != -1:
files+=int(e.strip().split(' ')[0])
elif e.find('insertion') != -1:
insertions+=int(e.strip().split(' ')[0])
elif e.find('deletion') != -1:
deletions+=int(e.strip().split(' ')[0])
print "Files changed: %d" % files
print "Insertions: %d" % insertions
print "Deletions: %d" % deletions
os.unlink(TMPNAME)
|
tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor_unittest.py | kjthegod/chromium | 231 | 27591 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import android_ds2784_power_monitor
class DS2784PowerMonitorMonitorTest(unittest.TestCase):
def testEnergyComsumption(self):
data = ('0000 1000 -10 12\n'
'1800 1000 -10 11\n'
'3600 1000 -10 09\n'
'5400 0000 -20 08\n'
'7200 0000 -20 11\n'
'9000 0000 -20 11\n')
results = (
android_ds2784_power_monitor.DS2784PowerMonitor.ParseSamplingOutput(
data))
self.assertEqual(results['power_samples_mw'], [1.2e-07, 1.1e-07, 9e-08,
1.6e-07, 2.2e-07, 2.2e-07])
self.assertEqual(results['energy_consumption_mwh'], 2.1e-07)
|
nlt/debug/dataset.py | isabella232/neural-light-transport | 176 | 27607 | <filename>nlt/debug/dataset.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join, dirname
from absl import app
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append('../')
import datasets
from util import io as ioutil
def main(_):
config_ini = join(dirname(__file__), '..', 'config', 'dragon_specular.ini')
config = ioutil.read_config(config_ini)
# Make training dataset
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'train')
path = dataset.files[1]
ret = dataset._load_data(path)
# Iterate
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch)
for batch_i, batch in enumerate(datapipe):
from IPython import embed; embed()
if __name__ == '__main__':
app.run(main)
|
events/migrations/0043_remove_premium_restrictions.py | alysivji/GetTogether | 446 | 27633 | # Generated by Django 2.0 on 2018-08-25 14:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("events", "0042_allow_team_without_country")]
operations = [
migrations.RemoveField(model_name="team", name="is_premium"),
migrations.RemoveField(model_name="team", name="premium_by"),
migrations.RemoveField(model_name="team", name="premium_expires"),
migrations.RemoveField(model_name="team", name="premium_started"),
]
|
test/com/facebook/buck/parser/testdata/disable_implicit_native_rules/skylark/implicit_in_extension_bzl/extension.bzl | Unknoob/buck | 8,027 | 27635 | <filename>test/com/facebook/buck/parser/testdata/disable_implicit_native_rules/skylark/implicit_in_extension_bzl/extension.bzl
""" Example module """
def java_maker(*args, **kwargs):
""" Make you a java """
java_library(*args, **kwargs)
|
thriftpy2/contrib/aio/transport/framed.py | JonnoFTW/thriftpy2 | 5,079 | 27644 | <filename>thriftpy2/contrib/aio/transport/framed.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import struct
import asyncio
from io import BytesIO
from .base import TAsyncTransportBase, readall
from .buffered import TAsyncBufferedTransport
class TAsyncFramedTransport(TAsyncTransportBase):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans):
self._trans = trans
self._rbuf = BytesIO()
self._wbuf = BytesIO()
def is_open(self):
return self._trans.is_open()
@asyncio.coroutine
def open(self):
return (yield from self._trans.open())
def close(self):
return self._trans.close()
@asyncio.coroutine
def read(self, sz):
# Important: don't attempt to read the next frame if the caller
# doesn't actually need any data.
if sz == 0:
return b''
ret = self._rbuf.read(sz)
if len(ret) != 0:
return ret
yield from self.read_frame()
return self._rbuf.read(sz)
@asyncio.coroutine
def read_frame(self):
buff = yield from readall(self._trans.read, 4)
sz, = struct.unpack('!i', buff)
frame = yield from readall(self._trans.read, sz)
self._rbuf = BytesIO(frame)
def write(self, buf):
self._wbuf.write(buf)
@asyncio.coroutine
def flush(self):
# reset wbuf before write/flush to preserve state on underlying failure
out = self._wbuf.getvalue()
self._wbuf = BytesIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive
# copies
self._trans.write(struct.pack("!i", len(out)) + out)
yield from self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TAsyncFramedTransportFactory(object):
def get_transport(self, trans):
return TAsyncBufferedTransport(TAsyncFramedTransport(trans))
|
upper_print.py | DazEB2/SimplePyScripts | 117 | 27657 | <reponame>DazEB2/SimplePyScripts<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def upper_print(f):
def wrapper(*args, **kwargs):
f(*[i.upper() if hasattr(i, 'upper') else i for i in args], **kwargs)
return wrapper
if __name__ == '__main__':
text = 'hello world!'
print(text) # hello world!
old_print = print
print = upper_print(print)
print(text) # HELLO WORLD!
print = old_print
print(text) # hello world!
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 2,757 | 27659 | import os
from test import test_support
# Skip this test if _tkinter does not exist.
test_support.import_module('_tkinter')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, '..', 'lib-tk', 'test'))
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(gui=False, packages=['test_ttk']))
if __name__ == '__main__':
test_main()
|
lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 194 | 27666 | from django.db import models
from lbworkflow.models import BaseWFObj
class Purchase(BaseWFObj):
title = models.CharField("Title", max_length=255)
reason = models.CharField("Reason", max_length=255)
def __str__(self):
return self.reason
class Item(models.Model):
purchase = models.ForeignKey(
Purchase,
on_delete=models.CASCADE,
)
name = models.CharField("Name", max_length=255)
qty = models.IntegerField("Qty")
note = models.CharField("Note", max_length=255)
class Meta:
verbose_name = "Purchase Item"
def __str__(self):
return self.name
|
cube2/server.py | bobssup/kripken | 892 | 27669 | <gh_stars>100-1000
#!/usr/bin/env python
'''
Sets up websocket server support to run the server in one HTML page and the client in another HTML page. Each connects to a websocket server, which we relay together, so the two pages think they are connected to each other (see websocket_bi tests in emscripten).
Instructions for websocket networking:
Mode 1: Two clients (one with embedded server)
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/game.html?low,low,windowed,serve in one browser
4. Run http://localhost:8888/game.html?low,low,windowed in another browser
5. In the second browser, do /connect
'windowed' runs in non-fullscreen mode, useful to run two browsers at once - scroll
all the way down to see the canvas. 'serve' runs the embedded server in that
client.
Mode 2: Server and client
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/server.html
4. Run http://localhost:8888/game.html?low,low
5. In the client, do /connect
Note that you likely need to run the server and client in different browsers or at least browser windows, since browsers throttle background tabs.
'''
import os, sys, multiprocessing, time
from subprocess import Popen, PIPE, STDOUT
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('tools/'), path_from_root('tools/websockify')]
import websockify
def websockify_func(wsp):
wsp.start_server()
client = websockify.WebSocketProxy(verbose=True, listen_port=28785, target_host="127.0.0.1", target_port=28786, run_once=True)
client_process = multiprocessing.Process(target=websockify_func, args=(client,))
client_process.start()
print 'client on process', client_process.pid
server = websockify.WebSocketProxy(verbose=True, listen_port=28780, target_host="127.0.0.1", target_port=28781, run_once=True)
server_process = multiprocessing.Process(target=websockify_func, args=(server,))
server_process.start()
print 'server on process', server_process.pid
def relay_server(child):
child.communicate()
relay_child = Popen(['python', path_from_root('tools', 'socket_relay.py'), '28781', '28786'])
relay_process = multiprocessing.Process(target=relay_server, args=(relay_child,))
relay_process.start()
print 'relay on process', relay_process.pid
while 1:
time.sleep(1)
|
src/spaczz/regex/__init__.py | brunobg/spaczz | 153 | 27682 | <reponame>brunobg/spaczz
"""Module for regex components."""
from .regexconfig import RegexConfig
__all__ = ["RegexConfig"]
|
imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 212 | 27683 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 12:31
from __future__ import unicode_literals
import json
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('annotations', '0005_auto_20170826_1424'),
]
def forward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all valid annotations from raw_vector to vector
for annotation in Annotation.objects.using(db_alias).all():
try:
vector = json.loads(annotation.raw_vector)
for key, value in vector.items():
try:
# try to convert all numeric vector values to integer
vector[key] = int(value)
except ValueError:
continue
annotation.vector = vector
annotation.save()
except ValueError:
# Annotation is invalid, delete it
annotation.delete()
def backward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all annotations from vector to raw_vector
for annotation in Annotation.objects.using(db_alias).all():
annotation.raw_vector = json.dumps(annotation.vector)
annotation.save()
operations = [
migrations.RenameField(
model_name='annotation',
old_name='vector',
new_name='raw_vector',
),
migrations.AddField(
model_name='annotation',
name='vector',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.RunPython(forward_func, backward_func, atomic=True),
]
|
asn1tools/version.py | eerimoq/asn1tools | 198 | 27689 | __version__ = '0.159.0'
|
tests/integration/boxscore/test_ncaab_boxscore.py | MArtinherz/sportsipy | 221 | 27694 | <reponame>MArtinherz/sportsipy<filename>tests/integration/boxscore/test_ncaab_boxscore.py<gh_stars>100-1000
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsipy import utils
from sportsipy.constants import HOME
from sportsipy.ncaab.constants import BOXSCORES_URL, SCHEDULE_URL
from sportsipy.ncaab.boxscore import Boxscore, Boxscores
MONTH = 1
YEAR = 2020
BOXSCORE = '2020-01-22-19-louisville'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaab', filename)
return open('%s' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
return read_file('table.html')
if url == BOXSCORES_URL % (MONTH, 5, YEAR):
return MockPQ(read_file('boxscores-1-5-2020.html'))
if url == BOXSCORES_URL % (MONTH, 6, YEAR):
return MockPQ(read_file('boxscores-1-6-2020.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAABBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'January 22, 2020',
'location': 'KFC Yum! Center, Louisville, Kentucky',
'winner': HOME,
'winning_name': 'Louisville',
'winning_abbr': 'LOUISVILLE',
'losing_name': 'Georgia Tech',
'losing_abbr': 'GEORGIA-TECH',
'pace': 66.2,
'away_ranking': None,
'away_win_percentage': .421,
'away_wins': 8,
'away_losses': 11,
'away_minutes_played': 200,
'away_field_goals': 22,
'away_field_goal_attempts': 48,
'away_field_goal_percentage': .458,
'away_two_point_field_goals': 17,
'away_two_point_field_goal_attempts': 31,
'away_two_point_field_goal_percentage': .548,
'away_three_point_field_goals': 5,
'away_three_point_field_goal_attempts': 17,
'away_three_point_field_goal_percentage': .294,
'away_free_throws': 15,
'away_free_throw_attempts': 20,
'away_free_throw_percentage': .750,
'away_offensive_rebounds': 7,
'away_defensive_rebounds': 23,
'away_total_rebounds': 30,
'away_assists': 11,
'away_steals': 4,
'away_blocks': 4,
'away_turnovers': 16,
'away_personal_fouls': 18,
'away_points': 64,
'away_true_shooting_percentage': .557,
'away_effective_field_goal_percentage': .510,
'away_three_point_attempt_rate': .354,
'away_free_throw_attempt_rate': .417,
'away_offensive_rebound_percentage': 28.0,
'away_defensive_rebound_percentage': 63.9,
'away_total_rebound_percentage': 49.2,
'away_assist_percentage': 50.0,
'away_steal_percentage': 6.1,
'away_block_percentage': 10.5,
'away_turnover_percentage': 22.0,
'away_offensive_rating': 97.0,
'away_defensive_rating': 103.0,
'home_ranking': 6,
'home_win_percentage': .842,
'home_wins': 16,
'home_losses': 3,
'home_minutes_played': 200,
'home_field_goals': 24,
'home_field_goal_attempts': 58,
'home_field_goal_percentage': .414,
'home_two_point_field_goals': 18,
'home_two_point_field_goal_attempts': 38,
'home_two_point_field_goal_percentage': .474,
'home_three_point_field_goals': 6,
'home_three_point_field_goal_attempts': 20,
'home_three_point_field_goal_percentage': .300,
'home_free_throws': 14,
'home_free_throw_attempts': 23,
'home_free_throw_percentage': .609,
'home_offensive_rebounds': 13,
'home_defensive_rebounds': 18,
'home_total_rebounds': 31,
'home_assists': 12,
'home_steals': 9,
'home_blocks': 3,
'home_turnovers': 10,
'home_personal_fouls': 17,
'home_points': 68,
'home_true_shooting_percentage': .493,
'home_effective_field_goal_percentage': .466,
'home_three_point_attempt_rate': .345,
'home_free_throw_attempt_rate': .397,
'home_offensive_rebound_percentage': 36.1,
'home_defensive_rebound_percentage': 72.0,
'home_total_rebound_percentage': 50.8,
'home_assist_percentage': 50.0,
'home_steal_percentage': 13.6,
'home_block_percentage': 9.7,
'home_turnover_percentage': 12.8,
'home_offensive_rating': 103.0,
'home_defensive_rating': 97.0
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore('2020-01-22-19-louisville')
def test_ncaab_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
assert getattr(self.boxscore, 'summary') == {
# Box score is not parsed correctly
'away': [],
'home': []
}
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_ncaab_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaab_boxscore_players(self):
boxscore = Boxscore(BOXSCORE)
assert len(boxscore.home_players) == 10
assert len(boxscore.away_players) == 7
for player in boxscore.home_players:
assert not player.dataframe.empty
for player in boxscore.away_players:
assert not player.dataframe.empty
def test_ncaab_boxscore_string_representation(self):
expected = ('Boxscore for Georgia Tech '
'at Louisville (January 22, 2020)')
boxscore = Boxscore(BOXSCORE)
assert boxscore.__repr__() == expected
class TestNCAABBoxscores:
def setup_method(self):
self.expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "<NAME>",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "<NAME>",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': '<NAME>',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': '<NAME>',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "<NAME>",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "<NAME>",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': '<NAME>',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': '<NAME>',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "<NAME>",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "<NAME>",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': 'Saint Louis',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northern Kentucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': '<NAME>',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': 'St. Bonaventure',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'St. Bonaventure',
'winning_abbr': 'st-bonaventure',
'losing_name': '<NAME>',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "<NAME> (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "<NAME> (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': '<NAME>',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': '<NAME>',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "Wright State",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Wright State',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5),
datetime(2020, 1, 4)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_days(self, *args, **kwargs):
expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "<NAME>",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "<NAME>",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': '<NAME>',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': 'Houston Baptist',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "<NAME>",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "<NAME>",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': 'Green Bay',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': '<NAME>',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': '<NAME>',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "<NAME>",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "<NAME>",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Saint Louis',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>entucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': 'Rhode Island',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': '<NAME>',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': '<NAME>',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'st-bonaventure',
'losing_name': '<NAME>',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "<NAME> (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "<NAME> (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': '<NAME>',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': '<NAME>',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "<NAME>",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
],
'1-6-2020': [
{'boxscore': '2020-01-06-21-oklahoma-state',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 55,
'away_rank': 17,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'West Virginia',
'winning_abbr': 'west-virginia',
'losing_name': 'Oklahoma State',
'losing_abbr': 'oklahoma-state'},
{'boxscore': '2020-01-06-20-jackson-state',
'away_name': 'Alabama A&M',
'away_abbr': 'alabama-am',
'away_score': 66,
'away_rank': None,
'home_name': 'Jackson State',
'home_abbr': 'jackson-state',
'home_score': 57,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Alabama A&M',
'winning_abbr': 'alabama-am',
'losing_name': '<NAME>',
'losing_abbr': 'jackson-state'},
{'boxscore': '2020-01-06-20-grambling',
'away_name': 'Alabama State',
'away_abbr': 'alabama-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Grambling',
'home_abbr': 'grambling',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Grambling',
'winning_abbr': 'grambling',
'losing_name': 'Alabama State',
'losing_abbr': 'alabama-state'},
{'boxscore': '2020-01-06-20-texas-southern',
'away_name': 'Alcorn State',
'away_abbr': 'alcorn-state',
'away_score': 95,
'away_rank': None,
'home_name': 'Texas Southern',
'home_abbr': 'texas-southern',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Al<NAME>',
'winning_abbr': 'alcorn-state',
'losing_name': 'Texas Southern',
'losing_abbr': 'texas-southern'},
{'boxscore': '2020-01-06-19-howard',
'away_name': 'Bethune-Cookman',
'away_abbr': 'bethune-cookman',
'away_score': 102,
'away_rank': None,
'home_name': 'Howard',
'home_abbr': 'howard',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bethune-Cookman',
'winning_abbr': 'bethune-cookman',
'losing_name': 'Howard',
'losing_abbr': 'howard'},
{'boxscore': '2020-01-06-19-army',
'away_name': 'Colgate',
'away_abbr': 'colgate',
'away_score': 70,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 65,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colgate',
'winning_abbr': 'colgate',
'losing_name': 'Army',
'losing_abbr': 'army'},
{'boxscore': '2020-01-06-19-north-carolina-at',
'away_name': 'Florida A&M',
'away_abbr': 'florida-am',
'away_score': 90,
'away_rank': None,
'home_name': 'North Carolina A&T',
'home_abbr': 'north-carolina-at',
'home_score': 97,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Carolina A&T',
'winning_abbr': 'north-carolina-at',
'losing_name': 'Florida A&M',
'losing_abbr': 'florida-am'},
{'boxscore': '2020-01-06-19-arkansas-little-rock',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 73,
'away_rank': None,
'home_name': '<NAME>ock',
'home_abbr': 'little-rock',
'home_score': 79,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'little-rock',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2020-01-06-20-arkansas-state',
'away_name': 'Georgia State',
'away_abbr': 'georgia-state',
'away_score': 87,
'away_rank': None,
'home_name': 'Arkansas State',
'home_abbr': 'arkansas-state',
'home_score': 90,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arkansas State',
'winning_abbr': 'arkansas-state',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2020-01-06-19-appalachian-state',
'away_name': 'Louisiana',
'away_abbr': 'louisiana',
'away_score': 81,
'away_rank': None,
'home_name': 'Appalachian State',
'home_abbr': 'appalachian-state',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2020-01-06-19-coastal-carolina',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 64,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolnia',
'home_score': 93,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Louisiana-Monroe',
'losing_abbr': 'louisiana-monroe'},
{'boxscore': '2020-01-06-19-coppin-state',
'away_name': 'Norfolk State',
'away_abbr': 'norfolk-state',
'away_score': 82,
'away_rank': None,
'home_name': 'Coppin State',
'home_abbr': 'coppin-state',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Norfolk State',
'winning_abbr': 'norfolk-state',
'losing_name': 'Coppin State',
'losing_abbr': 'coppin-state'},
{'boxscore': '2020-01-06-20-texas-arlington',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 66,
'away_rank': None,
'home_name': 'Texas-Arlington',
'home_abbr': 'texas-arlington',
'home_score': 54,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Alabama',
'winning_abbr': 'south-alabama',
'losing_name': 'Texas-Arlington',
'losing_abbr': 'texas-arlington'},
{'boxscore': '2020-01-06-19-morgan-state',
'away_name': 'South Carolina State',
'away_abbr': 'south-carolina-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Morgan State',
'home_abbr': 'morgan-state',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Morgan State',
'winning_abbr': 'morgan-state',
'losing_name': 'South Carolina State',
'losing_abbr': 'south-carolina-state'},
{'boxscore': '2020-01-06-21-prairie-view',
'away_name': 'Southern',
'away_abbr': 'southern',
'away_score': 54,
'away_rank': None,
'home_name': 'Prairie View',
'home_abbr': 'prairie-view',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'prairie-view',
'losing_name': 'Southern',
'losing_abbr': 'southern'},
{'boxscore': '2020-01-06-20-texas-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 71,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Troy',
'winning_abbr': 'troy',
'losing_name': 'Texas State',
'losing_abbr': 'texas-state'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_string_representation(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5))
assert result.__repr__() == 'NCAAB games for 1-5-2020'
|
image-generation/variational-auto-encoder/vq-vae/models/vq_vae.py | AaratiAkkapeddi/nnabla-examples | 228 | 27721 | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
import numpy as np
np.random.seed(1)
class ResidualStack(object):
def __init__(self, in_channels, num_hidden, num_res_layers, rng=313):
self.in_channels = in_channels
self.num_hidden = num_hidden
self.num_res_layers = num_res_layers
self.rng = rng
def __call__(self, x, test):
out = x
for i in range(self.num_res_layers):
out = self.res_block(out, scope_name='res_block_'+str(i))
return F.relu(out)
def res_block(self, x, scope_name='res_block', test=False):
with nn.parameter_scope(scope_name):
out = F.relu(x)
out = PF.convolution(out, self.num_hidden, (3, 3),
stride=(1, 1), pad=(1, 1), with_bias=False, name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, name='bn_1', batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (1, 1),
stride=(1, 1), with_bias=False, name='conv_2', rng=self.rng)
out = PF.batch_normalization(out, name='bn_2', batch_stat=not test)
return x + out
class VectorQuantizer(object):
def __init__(self, embedding_dim, num_embedding, commitment_cost, rng,
scope_name='vector_quantizer'):
self.embedding_dim = embedding_dim
self.num_embedding = num_embedding
self.commitment_cost = commitment_cost
self.rng = rng
self.scope_name = scope_name
with nn.parameter_scope(scope_name):
self.embedding_weight = nn.parameter.get_parameter_or_create('W', shape=(self.num_embedding, self.embedding_dim),
initializer=I.UniformInitializer((-1./self.num_embedding, 1./self.num_embedding), rng=self.rng), need_grad=True)
def __call__(self, x, return_encoding_indices=False):
x = F.transpose(x, (0, 2, 3, 1))
x_flat = x.reshape((-1, self.embedding_dim))
x_flat_squared = F.broadcast(
F.sum(x_flat**2, axis=1, keepdims=True), (x_flat.shape[0], self.num_embedding))
emb_wt_squared = F.transpose(
F.sum(self.embedding_weight**2, axis=1, keepdims=True), (1, 0))
distances = x_flat_squared + emb_wt_squared - 2 * \
F.affine(x_flat, F.transpose(self.embedding_weight, (1, 0)))
encoding_indices = F.min(
distances, only_index=True, axis=1, keepdims=True)
encoding_indices.need_grad = False
quantized = F.embed(encoding_indices.reshape(
encoding_indices.shape[:-1]), self.embedding_weight).reshape(x.shape)
if return_encoding_indices:
return encoding_indices, F.transpose(quantized, (0, 3, 1, 2))
encodings = F.one_hot(encoding_indices, (self.num_embedding,))
e_latent_loss = F.mean(F.squared_error(
quantized.get_unlinked_variable(need_grad=False), x))
q_latent_loss = F.mean(F.squared_error(
quantized, x.get_unlinked_variable(need_grad=False)))
loss = q_latent_loss + self.commitment_cost*e_latent_loss
quantized = x + (quantized - x).get_unlinked_variable(need_grad=False)
avg_probs = F.mean(encodings, axis=0)
perplexity = F.exp(-F.sum(avg_probs*F.log(avg_probs+1.0e-10)))
return loss, F.transpose(quantized, (0, 3, 1, 2)), perplexity, encodings
class VQVAE(object):
def __init__(self, config, training=True):
self.in_channels = config['model']['in_channels']
self.num_hidden = config['model']['num_hidden']
self.num_res_layers = config['model']['num_res_layers']
self.rng = np.random.RandomState(config['model']['rng'])
self.encoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.decoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.num_embedding = config['model']['num_embeddings']
self.embedding_dim = config['model']['embedding_dim']
self.commitment_cost = config['model']['commitment_cost']
self.decay = config['model']['decay']
self.training = training
self.vq = VectorQuantizer(
self.embedding_dim, self.num_embedding, self.commitment_cost, self.rng)
def encoder(self, x, test):
with nn.parameter_scope('encoder'):
out = PF.convolution(x, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_2', rng=self.rng)
out = self.encoder_res_stack(out, test=test)
return out
def decoder(self, x, test):
with nn.parameter_scope('decoder'):
out = self.decoder_res_stack(x, test=test)
out = F.relu(out)
out = PF.deconvolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.deconvolution(out, self.in_channels, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_2', rng=self.rng)
out = F.tanh(out)
return out
def __call__(self, img, return_encoding_indices=False, quantized_as_input=False, test=False):
with nn.parameter_scope('vq_vae'):
# import pdb; pdb.set_trace()
if quantized_as_input:
return self.decoder(img, test)
z = self.encoder(img, test)
z = PF.convolution(z, self.embedding_dim, (1, 1), stride=(1, 1))
if return_encoding_indices:
return self.vq(z, return_encoding_indices=True)
loss, quantized, perplexity, encodings = self.vq(z)
img_recon = self.decoder(quantized, test)
return loss, img_recon, perplexity
|
magma/backend/coreir/coreir_transformer.py | leonardt/magma | 167 | 27723 | <gh_stars>100-1000
from abc import ABC, abstractmethod
from copy import copy
import json
import logging
import os
import coreir as pycoreir
from magma.digital import Digital
from magma.array import Array
from magma.bits import Bits
from magma.backend.check_wiring_context import check_wiring_context
from magma.backend.coreir.coreir_utils import (
attach_debug_info, check_magma_interface, constant_to_value, get_inst_args,
get_module_of_inst, magma_interface_to_coreir_module_type,
magma_port_to_coreir_port, make_cparams, map_genarg,
magma_name_to_coreir_select, Slice)
from magma.compile_exception import UnconnectedPortException
from magma.interface import InterfaceKind
from magma.is_definition import isdefinition
from magma.linking import (
get_linked_modules, has_default_linked_module, get_default_linked_module)
from magma.logging import root_logger
from magma.passes import dependencies
from magma.tuple import Tuple
from magma.backend.util import get_codegen_debug_info
from magma.clock import is_clock_or_nested_clock
from magma.passes.clock import (
drive_all_undriven_clocks_in_value, get_all_output_clocks_in_defn)
from magma.config import get_debug_mode
from magma.protocol_type import MagmaProtocol, MagmaProtocolMeta
from magma.ref import PortViewRef, ArrayRef
from magma.symbol_table import SYMBOL_TABLE_EMPTY
# NOTE(rsetaluri): We do not need to set the level of this logger since it has
# already been done in backend/coreir/coreir_backend.py.
_logger = root_logger().getChild("coreir_backend")
_generator_callbacks = {}
def _is_generator(ckt_or_inst):
return ckt_or_inst.coreir_genargs is not None
def _coreir_longname(magma_defn_or_decl, coreir_module_or_generator):
# NOTE(rsetaluri): This is a proxy to exposing a pycoreir/coreir-c API to
# get a module's longname. This logic should be identical right now. Another
# caveat is that we don't elaborate the CoreIR generator at the magma level,
# so it's longname needs to be dynamically reconstructed anyway.
namespace = coreir_module_or_generator.namespace.name
prefix = "" if namespace == "global" else f"{namespace}_"
longname = prefix + coreir_module_or_generator.name
if isinstance(coreir_module_or_generator, pycoreir.Module):
return longname
assert isinstance(coreir_module_or_generator, pycoreir.Generator)
param_keys = coreir_module_or_generator.params.keys()
for k in param_keys:
v = magma_defn_or_decl.coreir_genargs[k]
longname += f"__{k}{v}"
return longname
def _collect_drivers(value):
"""
Iterate over value to collect the child drivers, packing slices together
"""
drivers = []
start_idx = 0
for i in range(1, len(value)):
# If the next value item is not a reference to an array of bits where
# the array matches the previous item and the index is incremented by
# one, append the current slice to drivers (may introduce slices of
# length 1)
if not (
isinstance(value[i].name, ArrayRef) and
issubclass(value[i].name.array.T, Digital) and
isinstance(value[i - 1].name, ArrayRef) and
value[i].name.array is value[i - 1].name.array and
value[i].name.index == value[i - 1].name.index + 1
):
drivers.append(value[start_idx:i])
start_idx = i
drivers.append(value[start_idx:])
return drivers
def _unwrap(x):
if isinstance(x, MagmaProtocol):
return x._get_magma_value_()
if isinstance(x, MagmaProtocolMeta):
return x._to_magma_()
return x
class TransformerBase(ABC):
__MISSING = object()
def __init__(self, backend, opts):
self.backend = backend
self.opts = opts
self.ran = False
self._children = None
def run(self):
if self.ran:
raise RuntimeError("Can only run transformer once")
self._children = self.children()
for child in self._children:
child.run()
self.run_self()
self.ran = True
@abstractmethod
def children(self):
raise NotImplementedError()
def run_self(self):
pass
def get_opt(self, key, default=__MISSING):
if default is TransformerBase.__MISSING:
return self.opts[key]
return self.opts.get(key, default)
class LeafTransformer(TransformerBase):
def children(self):
return []
class DefnOrDeclTransformer(TransformerBase):
def __init__(self, backend, opts, defn_or_decl):
super().__init__(backend, opts)
self.defn_or_decl = defn_or_decl
self.coreir_module = None
def children(self):
if _is_generator(self.defn_or_decl):
return [GeneratorTransformer(
self.backend, self.opts, self.defn_or_decl)]
try:
coreir_module = self.backend.get_module(self.defn_or_decl)
_logger.debug(f"{self.defn_or_decl} already compiled, skipping")
self.coreir_module = coreir_module
return []
except KeyError:
pass
if not isdefinition(self.defn_or_decl):
return [DeclarationTransformer(self.backend,
self.opts,
self.defn_or_decl)]
wrapped = getattr(self.defn_or_decl, "wrappedModule", None)
if wrapped and wrapped.context is self.backend.context:
return [WrappedTransformer(self.backend,
self.opts,
self.defn_or_decl)]
return [DefinitionTransformer(self.backend,
self.opts,
self.defn_or_decl)]
def run_self(self):
self._run_self_impl()
self._generate_symbols()
self._link_default_module()
self._link_modules()
def _link_default_module(self):
if not has_default_linked_module(self.defn_or_decl):
return
target = get_default_linked_module(self.defn_or_decl)
target = self.backend.get_module(target)
self.coreir_module.link_default_module(target)
def _link_modules(self):
targets = get_linked_modules(self.defn_or_decl)
for key, target in targets.items():
target = self.backend.get_module(target)
self.coreir_module.link_module(key, target)
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
out_module_name = _coreir_longname(
self.defn_or_decl, self.coreir_module)
self.opts.get("symbol_table").set_module_name(
self.defn_or_decl.name, out_module_name)
def _run_self_impl(self):
if self.coreir_module:
return
self.coreir_module = self._children[0].coreir_module
self.backend.add_module(self.defn_or_decl, self.coreir_module)
if isdefinition(self.defn_or_decl):
self.defn_or_decl.wrappedModule = self.coreir_module
libs = self.backend.included_libs()
self.defn_or_decl.coreir_wrapped_modules_libs_used = libs
class GeneratorTransformer(TransformerBase):
def __init__(self, backend, opts, defn_or_decl):
super().__init__(backend, opts)
self.defn_or_decl = defn_or_decl
self.coreir_module = None
def children(self):
try:
coreir_module = self.backend.get_module(self.defn_or_decl)
_logger.debug(f"{self.defn_or_decl} already compiled, skipping")
self.coreir_module = coreir_module
return []
except KeyError:
pass
assert not isdefinition(self.defn_or_decl)
return [DeclarationTransformer(self.backend,
self.opts,
self.defn_or_decl)]
def run_self(self):
self._generate_symbols()
if self.coreir_module is not None:
return
self.coreir_module = self._children[0].coreir_module
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
global _generator_callbacks
def _callback(coreir_inst):
magma_names = list(self.defn_or_decl.interface.ports.keys())
coreir_names = list(k for k, _ in coreir_inst.module.type.items())
assert len(magma_names) == len(coreir_names)
for magma_name, coreir_name in zip(magma_names, coreir_names):
self.opts.get("symbol_table").set_port_name(
self.defn_or_decl.name, magma_name, coreir_name)
assert self.defn_or_decl not in _generator_callbacks
_generator_callbacks[self.defn_or_decl] = _callback
class InstanceTransformer(LeafTransformer):
def __init__(self, backend, opts, inst, defn):
super().__init__(backend, opts)
self.inst = inst
self.defn = defn
self.coreir_inst_gen = None
def run_self(self):
self.coreir_inst_gen = self.run_self_impl()
def run_self_impl(self):
_logger.debug(
f"Compiling instance {(self.inst.name, type(self.inst).name)}"
)
defn = type(self.inst)
if hasattr(self.inst, "namespace"):
lib = self.backend.get_lib(self.inst.namespace)
else:
lib = self.backend.get_lib(self.inst.coreir_lib)
if self.inst.coreir_lib == "global":
lib = self.get_opt("user_namespace", lib)
if not _is_generator(self.inst):
module = get_module_of_inst(self.backend.context, self.inst, lib)
args = get_inst_args(self.inst)
args = self.backend.context.new_values(args)
return lambda m: m.add_module_instance(self.inst.name, module, args)
generator = lib.generators[defn.coreir_name]
config_args = {k: v for k, v in self.inst.coreir_configargs.items()}
config_args = self.backend.context.new_values(config_args)
gen_args = {k: map_genarg(self.backend.context, v)
for k, v in defn.coreir_genargs.items()}
gen_args = self.backend.context.new_values(gen_args)
return lambda m: m.add_generator_instance(
self.inst.name, generator, gen_args, config_args)
class WrappedTransformer(LeafTransformer):
def __init__(self, backend, opts, defn):
super().__init__(backend, opts)
self.defn = defn
self.coreir_module = self.defn.wrappedModule
self.backend.include_lib_or_libs(
self.defn.coreir_wrapped_modules_libs_used)
class DefinitionTransformer(TransformerBase):
def __init__(self, backend, opts, defn):
super().__init__(backend, opts)
self.defn = defn
self.coreir_module = None
self.decl_tx = DeclarationTransformer(self.backend,
self.opts,
self.defn)
self.inst_txs = {
inst: InstanceTransformer(self.backend, self.opts, inst, self.defn)
for inst in self.defn.instances
}
self.clocks = get_all_output_clocks_in_defn(defn)
self._constant_cache = {}
def children(self):
children = []
if not self.get_opt("skip_instance_graph", False):
deps = dependencies(self.defn, include_self=False)
opts = self.opts.copy()
opts.update({"skip_instance_graph": True})
children += [DefnOrDeclTransformer(self.backend, opts, dep)
for dep in deps]
children += [self.decl_tx]
children += self.inst_txs.values()
return children
def run_self(self):
_logger.debug(f"Compiling definition {self.defn}")
self.coreir_module = self.decl_tx.coreir_module
if self.defn.inline_verilog_strs:
inline_verilog = "\n\n".join(x[0] for x in
self.defn.inline_verilog_strs)
connect_references = {}
for _, inline_value_map in self.defn.inline_verilog_strs:
for key, value in inline_value_map.items():
connect_references[key] = magma_port_to_coreir_port(value)
self.coreir_module.add_metadata("inline_verilog", json.dumps(
{"str": inline_verilog,
"connect_references": connect_references}
))
for name, module in self.defn.compiled_bind_modules.items():
self.backend.bind_module(name, module)
self.coreir_module.definition = self.get_coreir_defn()
def _generate_symbols(self, coreir_insts):
if not self.get_opt("generate_symbols", False):
return
for inst, coreir_inst in coreir_insts.items():
self.get_opt("symbol_table").set_instance_name(
self.defn.name, inst.name,
(SYMBOL_TABLE_EMPTY, coreir_inst.name))
self.get_opt("symbol_table").set_instance_type(
self.defn.name, inst.name, type(inst).name)
def get_coreir_defn(self):
coreir_defn = self.coreir_module.new_definition()
coreir_insts = {inst: self.inst_txs[inst].coreir_inst_gen(coreir_defn)
for inst in self.defn.instances}
# Call generator callback if necessary.
global _generator_callbacks
for inst, coreir_inst in coreir_insts.items():
try:
callback = _generator_callbacks.pop(type(inst))
except KeyError:
continue
callback(coreir_inst)
self._generate_symbols(coreir_insts)
# If this module was imported from verilog, do not go through the
# general module construction flow. Instead just attach the verilog
# source as metadata and return the module.
if hasattr(self.defn, "verilogFile") and self.defn.verilogFile:
metadata = json.dumps({"verilog_string": self.defn.verilogFile})
self.coreir_module.add_metadata("verilog", metadata)
return coreir_defn
if hasattr(self.defn, "verilog") and self.defn.verilog:
metadata = json.dumps({"verilog_body": self.defn.verilog})
self.coreir_module.add_metadata("verilog", metadata)
return coreir_defn
if self.defn.coreir_lib is not None:
self.backend.include_lib_or_libs(self.defn.coreir_lib)
for name, port in self.defn.interface.ports.items():
_logger.debug(f"{name}, {port}, {port.is_output()}")
for inst, coreir_inst in coreir_insts.items():
if get_codegen_debug_info() and getattr(inst, "debug_info", False):
attach_debug_info(coreir_inst, inst.debug_info)
if getattr(inst, "coreir_metadata"):
for k, v in inst.coreir_metadata.items():
coreir_inst.add_metadata(k, json.dumps(v))
for inst in coreir_insts:
for name, port in inst.interface.ports.items():
self.connect_non_outputs(coreir_defn, port)
for port in self.defn.interface.ports.values():
self.connect_non_outputs(coreir_defn, port)
return coreir_defn
def connect_non_outputs(self, module_defn, port):
# Recurse into non input types that may contain inout children.
if isinstance(port, Tuple) and not port.is_input() or \
isinstance(port, Array) and not port.T.is_input():
for elem in port:
self.connect_non_outputs(module_defn, elem)
elif not port.is_output():
self.connect(module_defn, port, port.trace())
def get_source(self, port, value, module_defn):
port = _unwrap(port)
value = _unwrap(value)
if isinstance(value, pycoreir.Wireable):
return value
if isinstance(value, Slice):
return module_defn.select(value.get_coreir_select())
if isinstance(value, Bits) and value.const():
return self._const_instance(value, len(value), module_defn)
if value.anon() and isinstance(value, Array):
drivers = _collect_drivers(value)
offset = 0
for d in drivers:
d = _unwrap(d)
if len(d) == 1:
# _collect_drivers will introduce a slice of length 1 for
# non-slices, so we index them here with 0 to unpack the
# extra array dimension
self.connect(module_defn, port[offset], d[0])
else:
self.connect(module_defn,
Slice(port, offset, offset + len(d)),
Slice(d[0].name.array, d[0].name.index,
d[-1].name.index + 1))
offset += len(d)
return None
if isinstance(value, Tuple) and value.anon():
for p, v in zip(port, value):
self.connect(module_defn, p, v)
return None
if value.const():
return self._const_instance(value, None, module_defn)
if isinstance(value.name, PortViewRef):
return module_defn.select(
magma_name_to_coreir_select(value.name))
return module_defn.select(magma_port_to_coreir_port(value))
def connect(self, module_defn, port, value):
if value is None and is_clock_or_nested_clock(type(port)):
with self.defn.open():
if not drive_all_undriven_clocks_in_value(port, self.clocks):
# No default clock
raise UnconnectedPortException(port)
value = port.trace()
if value is None:
if port.is_inout():
return # skip inouts because they might be conn. as an input.
if getattr(self.defn, "_ignore_undriven_", False):
return
raise UnconnectedPortException(port)
check_wiring_context(port, value)
source = self.get_source(port, value, module_defn)
if not source:
return
sink = module_defn.select(magma_port_to_coreir_port(port))
module_defn.connect(source, sink)
if get_codegen_debug_info() and getattr(port, "debug_info", False):
attach_debug_info(module_defn, port.debug_info, source, sink)
def _const_instance(self, constant, num_bits, module_defn):
value = constant_to_value(constant)
key = (value, num_bits)
try:
return self._constant_cache[key]
except KeyError:
pass
if num_bits is None:
config = self.backend.context.new_values({"value": bool(value)})
name = f"bit_const_{value}_{num_bits}"
mod = self.backend.get_lib("corebit").modules["const"]
module_defn.add_module_instance(name, mod, config)
else:
config = self.backend.context.new_values({"value": value})
name = f"const_{value}_{num_bits}"
gen = self.backend.get_lib("coreir").generators["const"]
gen_args = self.backend.context.new_values({"width": num_bits})
module_defn.add_generator_instance(name, gen, gen_args, config)
out = module_defn.select(f"{name}.out")
return self._constant_cache.setdefault(key, out)
class DeclarationTransformer(LeafTransformer):
def __init__(self, backend, opts, decl):
super().__init__(backend, opts)
self.decl = decl
self.coreir_module = None
def run_self(self):
self.coreir_module = self._run_self_impl()
self._generate_symbols()
def _generate_symbols(self):
if not self.get_opt("generate_symbols", False):
return
if _is_generator(self.decl):
return
magma_names = list(self.decl.interface.ports.keys())
coreir_names = list(k for k, _ in self.coreir_module.type.items())
assert len(magma_names) == len(coreir_names)
for magma_name, coreir_name in zip(magma_names, coreir_names):
self.opts.get("symbol_table").set_port_name(
self.decl.name, magma_name, coreir_name)
def _run_self_impl(self):
self.decl = self.decl
_logger.debug(f"Compiling declaration {self.decl}")
if self.decl.coreir_lib is not None:
self.backend.include_lib_or_libs(self.decl.coreir_lib)
# These libraries are already available by default in coreir, so we
# don't need declarations.
if self.decl.coreir_lib in ["coreir", "corebit", "commonlib",
"memory"]:
lib = self.backend.get_lib(self.decl.coreir_lib)
if not _is_generator(self.decl):
return lib.modules[self.decl.coreir_name]
return lib.generators[self.decl.coreir_name]
try:
coreir_module = self.backend.get_module(self.decl)
_logger.debug(f"{self.decl} already compiled, skipping")
return coreir_module
except KeyError:
pass
if get_debug_mode():
check_magma_interface(self.decl.interface)
module_type = magma_interface_to_coreir_module_type(
self.backend.context, self.decl.interface)
if isinstance(self.decl.interface, InterfaceKind):
module_type = self.backend.context.Flip(module_type)
kwargs = {}
if hasattr(self.decl, "coreir_config_param_types"):
param_types = self.decl.coreir_config_param_types
kwargs["cparams"] = make_cparams(self.backend.context, param_types)
if hasattr(self.decl, "namespace"):
# Allows users to choose namespace explicitly with
# class MyCircuit(m.Circuit):
# namespace = "foo"
# overrides user_namespace setting
namespace = self.backend.get_lib(self.decl.namespace)
else:
namespace = self.get_opt("user_namespace",
self.backend.context.global_namespace)
coreir_module = namespace.new_module(
self.decl.coreir_name, module_type, **kwargs)
if get_codegen_debug_info() and self.decl.debug_info:
attach_debug_info(coreir_module, self.decl.debug_info)
for key, value in self.decl.coreir_metadata.items():
coreir_module.add_metadata(key, json.dumps(value))
return coreir_module
|
test/connector/exchange/altmarkets/test_altmarkets_user_stream_tracker.py | BGTCapital/hummingbot | 542 | 27726 | <gh_stars>100-1000
#!/usr/bin/env python
import sys
import asyncio
import logging
import unittest
import conf
from os.path import join, realpath
from hummingbot.connector.exchange.altmarkets.altmarkets_user_stream_tracker import AltmarketsUserStreamTracker
from hummingbot.connector.exchange.altmarkets.altmarkets_auth import AltmarketsAuth
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
sys.path.insert(0, realpath(join(__file__, "../../../../../")))
logging.basicConfig(level=METRICS_LOG_LEVEL)
class AltmarketsUserStreamTrackerUnitTest(unittest.TestCase):
api_key = conf.altmarkets_api_key
api_secret = conf.altmarkets_secret_key
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.trading_pairs = ["BTC-USD"]
cls.user_stream_tracker: AltmarketsUserStreamTracker = AltmarketsUserStreamTracker(
altmarkets_auth=AltmarketsAuth(cls.api_key, cls.api_secret),
trading_pairs=cls.trading_pairs)
cls.user_stream_tracker_task: asyncio.Task = safe_ensure_future(cls.user_stream_tracker.start())
def test_user_stream(self):
# Wait process some msgs.
print("\nSleeping for 30s to gather some user stream messages.")
self.ev_loop.run_until_complete(asyncio.sleep(30.0))
print(self.user_stream_tracker.user_stream)
|
catalyst/core/misc.py | tadejsv/catalyst | 206 | 27738 | from typing import Dict, List, Tuple, Union
from collections import OrderedDict
from functools import lru_cache
import warnings
from torch.utils.data import BatchSampler, DataLoader
from catalyst.core.callback import (
Callback,
CallbackWrapper,
IBackwardCallback,
ICriterionCallback,
IOptimizerCallback,
ISchedulerCallback,
)
from catalyst.typing import RunnerCriterion, RunnerOptimizer, RunnerScheduler
def get_original_callback(callback: Callback) -> Callback:
"""Docs."""
while isinstance(callback, CallbackWrapper):
callback = callback.callback
return callback
def callback_isinstance(callback: Callback, class_or_tuple) -> bool:
"""Check if callback is the same type as required ``class_or_tuple``
Args:
callback: callback to check
class_or_tuple: class_or_tuple to compare with
Returns:
bool: true if first object has the required type
"""
callback = get_original_callback(callback)
return isinstance(callback, class_or_tuple)
def sort_callbacks_by_order(
callbacks: Union[List, Dict, OrderedDict]
) -> "OrderedDict[str, Callback]":
"""Creates an sequence of callbacks and sort them.
Args:
callbacks: either list of callbacks or ordered dict
Returns:
sequence of callbacks sorted by ``callback order``
Raises:
TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list`
"""
if callbacks is None:
output = OrderedDict()
elif isinstance(callbacks, (dict, OrderedDict)):
output = [(k, v) for k, v in callbacks.items()]
output = sorted(output, key=lambda x: x[1].order)
output = OrderedDict(output)
elif isinstance(callbacks, list):
output = sorted(callbacks, key=lambda x: x.order)
output = OrderedDict([(i, value) for i, value in enumerate(output)])
else:
raise TypeError(
f"Callbacks must be either Dict/OrderedDict or list, "
f"got {type(callbacks)}"
)
return output
@lru_cache(maxsize=42)
def is_str_intersections(origin_string: str, strings: Tuple):
"""Docs."""
return any(x in origin_string for x in strings)
def get_loader_batch_size(loader: DataLoader):
"""Docs."""
batch_size = loader.batch_size
if batch_size is not None:
return batch_size
batch_size = loader.batch_sampler.batch_size
if batch_size is not None:
return batch_size
raise NotImplementedError(
"No `batch_size` found,"
"please specify it with `loader.batch_size`,"
"or `loader.batch_sampler.batch_size`"
)
def get_loader_num_samples(loader: DataLoader):
"""Docs."""
batch_size = get_loader_batch_size(loader)
if isinstance(loader.batch_sampler, BatchSampler):
# pytorch default item-based samplers
if loader.drop_last:
return (len(loader.dataset) // batch_size) * batch_size
else:
return len(loader.dataset)
else:
# pytorch batch-based samplers
return len(loader) * batch_size
def check_callbacks(
callbacks: OrderedDict,
criterion: RunnerCriterion = None,
optimizer: RunnerOptimizer = None,
scheduler: RunnerScheduler = None,
):
"""Docs."""
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if criterion is not None and not callback_exists(ICriterionCallback):
warnings.warn(
"No ``ICriterionCallback/CriterionCallback`` were found "
"while runner.criterion is not None."
"Do you compute the loss during ``runner.handle_batch``?"
)
if (criterion is not None or optimizer is not None) and not callback_exists(
IBackwardCallback
):
warnings.warn(
"No ``IBackwardCallback/BackwardCallback`` were found "
"while runner.criterion/optimizer is not None."
"Do you backward the loss during ``runner.handle_batch``?"
)
if optimizer is not None and not callback_exists(IOptimizerCallback):
warnings.warn(
"No ``IOptimizerCallback/OptimizerCallback`` were found "
"while runner.optimizer is not None."
"Do run optimisation step pass during ``runner.handle_batch``?"
)
if scheduler is not None and not callback_exists(ISchedulerCallback):
warnings.warn(
"No ``ISchedulerCallback/SchedulerCallback`` were found "
"while runner.scheduler is not None."
"Do you make scheduler step during ``runner.handle_batch``?"
)
__all__ = [
"get_original_callback",
"callback_isinstance",
"check_callbacks",
"is_str_intersections",
"get_loader_batch_size",
"get_loader_num_samples",
"sort_callbacks_by_order",
]
|
practice/bitwise_operations/ex2.py | recursivelycurious/wordnik-repl | 346 | 27764 | <reponame>recursivelycurious/wordnik-repl
print 0b1, #1
print 0b10, #2
print 0b11, #3
print 0b100, #4
print 0b101, #5
print 0b110, #6
print 0b111 #7
print "******"
print 0b1 + 0b11
print 0b11 * 0b11
|
learning_algorithm/neural_network.py | Bermuhz/DataMiningCompetitionFirstPrize | 128 | 27773 | <reponame>Bermuhz/DataMiningCompetitionFirstPrize
from sklearn.neural_network import MLPClassifier
from commons import variables
from commons import tools
from scipy.stats import mode
def learn(x, y, test_x):
(temp_x, temp_y) = tools.simple_negative_sample(x, y, variables.select_rate_nn)
clf = MLPClassifier(hidden_layer_sizes=(variables.unit_num_nn,), random_state=2017, max_iter=2000,
alpha=variables.alpha_nn,
learning_rate_init=variables.learning_rate_init_nn,solver="adam",activation="relu").fit(temp_x, temp_y)
prediction_list = clf.predict(test_x)
prediction_list_prob = clf.predict_proba(test_x)
return prediction_list,prediction_list_prob
|
top_players.py | ergest/Fantasy-Premier-League | 1,011 | 27794 | <gh_stars>1000+
from getters import *
from parsers import *
def main():
data = get_data()
parse_top_players(data, 'data/2020-21')
if __name__ == '__main__':
main()
|
ch10-unsupervised/clustering/spectral_clustering/tests/test_spectral_embedding_.py | skforest/intro_ds | 314 | 27796 | # -*- coding: UTF-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from spectral_clustering.spectral_embedding_ import spectral_embedding
def assert_first_col_equal(maps):
constant_vec = [1] * maps.shape[0]
assert_array_almost_equal(maps[:, 0] / maps[0, 0], constant_vec)
def test_spectral_embedding():
"""
根据spectral embedding的定义,第一列的数据是恒等的
"""
adjacency = np.array([
[0., 0.8, 0.9, 0.],
[0.8, 0., 0., 0.],
[0.9, 0., 0., 1.],
[0., 0., 1., 0.]])
maps = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="arpack")
assert_first_col_equal(maps)
maps_1 = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="lobpcg")
assert_first_col_equal(maps_1)
|
GPLT_Python/L1-003.py | upupming/algorithm | 107 | 27803 | <gh_stars>100-1000
a = str(input())
b = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0}
for i in a:
b[i] = b[i] + 1
for i in range(len(b)):
if b[str(i)] == 0:
continue
print(str(i) + ':' + str(b[str(i)]))
|
cubes/query/__init__.py | digitalsatori/cubes | 1,020 | 27807 | from .browser import *
from .cells import *
from .computation import *
from .statutils import *
|
pymoo/operators/mutation/nom.py | jarreguit/pymoo | 762 | 27808 | <filename>pymoo/operators/mutation/nom.py
from pymoo.core.mutation import Mutation
class NoMutation(Mutation):
def _do(self, problem, X, **kwargs):
return X
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.