max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tensorflow_gnn/graph/schema_validation.py | mattdangerw/gnn | 611 | 12683139 | <filename>tensorflow_gnn/graph/schema_validation.py
"""Graph schema validation routines.
This module provides a simple container for the ragged tensors associated with
multiple sets of nodes, edges, and graph-global data. See go/graph-tensor for
details.
"""
from typing import List
from absl import logging # TODO(blais): Remove, see below.
import tensorflow as tf
from tensorflow_gnn.graph import adjacency as adj
from tensorflow_gnn.graph import graph_constants as const
from tensorflow_gnn.graph import graph_tensor as gt
from tensorflow_gnn.graph import schema_utils as su
import tensorflow_gnn.proto.graph_schema_pb2 as schema_pb2
# The supported data types. Note that these are currently limited to the ones
# supported by `tensorflow.Example` but we can eventually extend the list by
# adding casting transformations, and supporting other data formats for
# encoding.
VALID_DTYPES = (tf.string, tf.int64, tf.float32)
class ValidationError(ValueError):
"""A schema validation error.
This exception is raised if in the course of validating the schema for
correctness some errors are found.
"""
def validate_schema(schema: schema_pb2.GraphSchema) -> List[Exception]:
"""Validates the correctness of a graph schema instance.
`GraphSchema` configuration messages are created by users in order to describe
the topology of a graph. This function checks various aspects of the schema
for correctness, e.g. prevents usage of reserved feature names, ensures given
shapes are fully-defined, ensures set name references are found, etc.
Args:
schema: An instance of the graph schema.
Returns:
A list of exceptions describing optional warnings.
Render those to your favorite stream (or ignore).
Raises:
ValidationError: If a validation check fails.
"""
_validate_schema_feature_dtypes(schema)
_validate_schema_shapes(schema)
_validate_schema_descriptions(schema)
_validate_schema_reserved_feature_names(schema)
_validate_schema_context_references(schema)
_validate_schema_node_set_references(schema)
return _warn_schema_scalar_shapes(schema)
def check_required_features(requirements: schema_pb2.GraphSchema,
actual: schema_pb2.GraphSchema):
"""Checks the requirements of a given schema against another.
This function is used to enable the specification of required features to a
function. A function accepting a `GraphTensor` instance can this way document
what features it is expecting to find on it. The function accepts two schemas:
a `requirements` schema which describes what the function will attempt to
fetch and use on the `GraphTensor`, and an `actual` schema instance, which is
the schema describing the dataset. You can use this in your model code to
ensure that a dataset contains all the expected node sets, edge sets and
features that the model uses.
Note that a dimension with a size of `0` in a feature from the `requirements`
schema is interpreted specially: it means "accept any value for this
dimension." The special value `-1` is still used to represent a ragged
dimension.
(Finally, note that this function predates the existence of `GraphTensorSpec`,
which is a runtime descriptor for a `GraphTensor`. We may eventually perovide
an equivalent construct using the `GraphTensorSpec.)
Args:
requirements: An instance of a GraphSchema object, with optional shapes.
actual: The instance of actual schema to check is a matching superset
of the required schema.
Raises:
ValidationError: If the given schema does not fulfill the requirements.
"""
# Create maps of the required and provided features.
def build_schema_map(schema_):
mapping = {}
for (set_type, set_name, feature_name,
feature) in su.iter_features(schema_):
key = (set_type, set_name, feature_name)
mapping[key] = feature
return mapping
required = build_schema_map(requirements)
given = build_schema_map(actual)
for key, required_feature in required.items():
set_type, set_name, feature_name = key
try:
given_feature = given[key]
except KeyError:
raise ValidationError(
"{} feature '{}' from set '{}' is missing from given schema".format(
set_type.capitalize(), feature_name, set_name))
else:
if required_feature.HasField("dtype") and (
required_feature.dtype != given_feature.dtype):
raise ValidationError(
"{} feature '{}' from set '{}' has invalid type: {}".format(
set_type.capitalize(), feature_name, set_name,
given_feature.dtype))
if required_feature.HasField("shape"):
if len(given_feature.shape.dim) != len(required_feature.shape.dim):
raise ValidationError(
"{} feature '{}' from set '{}' has invalid shape: {}".format(
set_type.capitalize(), feature_name, set_name,
given_feature.shape))
for required_dim, given_dim in zip(required_feature.shape.dim,
given_feature.shape.dim):
if required_dim.size == 0: # Accept any dimension.
continue
elif given_dim.size != required_dim.size:
raise ValidationError(
"{} feature '{}' from set '{}' has invalid shape: {}".format(
set_type.capitalize(), feature_name, set_name,
given_feature.shape))
def _validate_schema_feature_dtypes(schema: schema_pb2.GraphSchema):
"""Verify that dtypes are set and from our list of supported types."""
for set_type, set_name, feature_name, feature in su.iter_features(schema):
if not feature.HasField("dtype"):
raise ValidationError(
"Missing 'dtype' field on {} set '{}' feature '{}'".format(
set_type, set_name, feature_name))
if feature.dtype not in {dtype.as_datatype_enum
for dtype in VALID_DTYPES}:
raise ValidationError(
("Invalid 'dtype' field {} on {} set '{}' feature '{}': {}; "
"valid types include: {}").format(
feature.dtype, set_type, set_name, feature_name, feature.dtype,
", ".join(map(str, VALID_DTYPES))))
def _validate_schema_shapes(schema: schema_pb2.GraphSchema):
"""Check for the validity of shape protos."""
for set_type, set_name, feature_name, feature in su.iter_features(schema):
if feature.shape.unknown_rank:
raise ValidationError(
"Shapes must have a known rank; on {} set '{}' feature '{}'".format(
set_type, set_name, feature_name))
def _warn_schema_scalar_shapes(schema: schema_pb2.GraphSchema):
"""Return warnings on unnecessary shapes of size 1. This is a common error.
Note that strictly speaking this should parse fine, the problem is that
clients will inevitably configure shapes of [1] where scalar shapes would be
sufficient. This check is there to nudge them in the right direction.
Args:
schema: A GraphSchema instance to validate.
Returns:
A list of ValidationError warnings to issue conditionally.
"""
warnings = []
for set_type, set_name, feature_name, feature in su.iter_features(schema):
if len(feature.shape.dim) == 1 and feature.shape.dim[0].size == 1:
warnings.append(ValidationError(
"Unnecessary shape of [1] in {} set '{}' / '{}'; use scalar feature "
"instead (i.e., specify an empty shape proto).".format(
set_type, set_name, feature_name)))
return warnings
def _validate_schema_descriptions(schema: schema_pb2.GraphSchema):
"""Verify that the descriptions aren't set on the shapes' .name fields."""
# This seems to be a common error.
name_fields = []
for set_type, set_name, feature_name, feature in su.iter_features(schema):
if feature.HasField("description"):
continue
for dim in feature.shape.dim:
if dim.name:
name_fields.append((set_type, set_name, feature_name))
if name_fields:
field_names = ",".join([str(ntuple) for ntuple in name_fields])
raise ValidationError(
"The following features are incorrectly locating the description on "
"the shape dimensions 'name' field: {}; use the 'description' field of "
"the feature instead".format(field_names))
def _validate_schema_reserved_feature_names(schema: schema_pb2.GraphSchema):
"""Check that reserved feature names aren't being used as explicit features."""
node_set_dicts = [("nodes", name, node_set.features)
for name, node_set in schema.node_sets.items()]
edge_set_dicts = [("edges", name, edge_set.features)
for name, edge_set in schema.edge_sets.items()]
for set_type, set_name, feature_dict in node_set_dicts + edge_set_dicts:
if const.SIZE_NAME in feature_dict:
raise ValidationError(
"Feature '{}' from {} set '{}' is reserved".format(
const.SIZE_NAME, set_type, set_name))
for set_type, set_name, feature_dict in edge_set_dicts:
for name in const.SOURCE_NAME, const.TARGET_NAME:
# Invalidate reserved feature names.
if name in feature_dict:
raise ValidationError(
"Feature '{}' from {} set '{}' is reserved".format(
name, set_type, set_name))
# TODO(blais): Make this compulsory after we remove the hardcoded
# feature names from the sampler.
for set_type, set_name, feature_name, feature in su.iter_features(schema):
if const.RESERVED_REGEX.match(feature_name):
logging.error("Invalid %s feature name '%s' on set '%s': reserved names "
"are not allowed", set_type, feature_name, set_name)
def _validate_schema_context_references(schema: schema_pb2.GraphSchema):
"""Verify the cross-references to context features from node and edge sets."""
for set_name, node_set in schema.node_sets.items():
for feature in node_set.context:
if feature not in schema.context.features:
raise ValidationError("Context feature '{}' does not exist "
"(from node set '{}')".format(feature, set_name))
for set_name, edge_set in schema.edge_sets.items():
for feature in edge_set.context:
if feature not in schema.context.features:
raise ValidationError("Context feature '{}' does not exist "
"(from edge set '{}')".format(feature, set_name))
def _validate_schema_node_set_references(schema: schema_pb2.GraphSchema):
"""Verify the source and target set references from the edge sets."""
for set_name, edge_set in schema.edge_sets.items():
for feature_name in edge_set.source, edge_set.target:
if feature_name not in schema.node_sets:
raise ValidationError(
"Edge set '{}' referencing unknown node set '{}'".format(
set_name, feature_name))
# TODO(blais): This code could eventually be folded into the various
# constructors of `GraphTensor` pieces.
def assert_constraints(graph: gt.GraphTensor) -> tf.Operation:
"""Validate the shape constaints of a graph's features at runtime.
This code returns a TensorFlow op with debugging assertions that ensure the
parsed data has valid shape constraints for a graph. This can be instantiated
in your TensorFlow graph while debugging if you believe that your data may be
incorrectly shaped, or simply applied to a manually produced dataset to ensure
that those constraints have been applied correctly.
Args:
graph: An instance of a `GraphTensor`.
Returns:
A list of check operations.
"""
return tf.group(
_assert_constraints_feature_shape_prefix(graph),
_assert_constraints_edge_shapes(graph),
_assert_constraints_edge_indices_range(graph),
)
def _assert_constraints_feature_shape_prefix(
graph: gt.GraphTensor) -> tf.Operation:
"""Validates the number of nodes or edges of feature tensors."""
with tf.name_scope("constraints_feature_shape_prefix"):
checks = []
for set_type, set_dict in [("node", graph.node_sets),
("edge", graph.edge_sets)]:
for set_name, feature_set in set_dict.items():
sizes = feature_set.sizes
# Check the rank is at least 1.
checks.append(tf.debugging.assert_rank_at_least(sizes, 1))
rank = tf.rank(sizes)
for feature_name, tensor in feature_set.features.items():
# Check that each tensor has greater or equal rank to the parent
# piece.
checks.append(tf.debugging.assert_greater_equal(
tf.rank(tensor), rank,
"Rank too small for {} feature '{}/{}'".format(
set_type, set_name, feature_name)))
# Check the prefix shape of the tensor matches.
checks.append(tf.debugging.assert_equal(
tensor.shape[:rank], sizes,
"Invalid prefix shape for {} feature: {}/{}".format(
set_type, set_name, feature_name)))
return tf.group(*checks)
def _assert_constraints_edge_indices_range(
graph: gt.GraphTensor) -> tf.Operation:
"""Validates that edge indices are within the bounds of node set sizes."""
with tf.name_scope("constraints_edge_indices_range"):
checks = []
for set_name, edge_set in graph.edge_sets.items():
adjacency = edge_set.adjacency
if not issubclass(type(adjacency), adj.HyperAdjacency):
raise ValueError(f"Adjacency type for constraints assertions must be "
f"HyperAdjacency: {adjacency}")
for tag, (node_set_name, indices) in sorted(adjacency
.get_indices_dict().items()):
# Check that the indices are positive.
flat_indices = (indices.flat_values
if isinstance(indices, tf.RaggedTensor)
else indices)
checks.append(tf.debugging.Assert(
tf.math.reduce_all(
tf.math.greater_equal(indices,
tf.constant(0, dtype=indices.dtype))),
["Index underflow",
"edges/{} {} indices:".format(set_name, tag), flat_indices],
name="check_indices_underflow", summarize=-1))
# Check the indices are smaller than the node tensor sizes.
sizes = graph.node_sets[node_set_name].sizes
checks.append(tf.debugging.Assert(
tf.math.reduce_all(
tf.math.less(indices, tf.expand_dims(sizes, axis=-1))),
["Index overflow",
"edges/{} {} indices:".format(set_name, tag), flat_indices,
"nodes/{} {}:".format(node_set_name, "size"), sizes],
name="check_indices_overflow", summarize=-1))
return tf.group(*checks)
def _assert_constraints_edge_shapes(graph: gt.GraphTensor) -> tf.Operation:
"""Validates edge shapes and that they contain a scalar index per node."""
with tf.name_scope("constraints_edge_indices_range"):
checks = []
for set_name, edge_set in graph.edge_sets.items():
adjacency = edge_set.adjacency
if not issubclass(type(adjacency), adj.HyperAdjacency):
raise ValueError(f"Adjacency type for constraints assertions must be "
f"HyperAdjacency: {adjacency}")
for tag, (_, indices) in sorted(adjacency.get_indices_dict().items()):
# Check the shape of the edge indices matches the size, and that the
# shape is scalar on the indices.
checks.append(tf.debugging.assert_equal(
indices.shape, edge_set.sizes,
"Invalid shape for edge indices: {}/{}".format(set_name, tag)))
return tf.group(*checks)
|
docs/conf.py | suevii/jupyterlab-lsp | 1,117 | 12683157 | """ Documentation configuration and workflow for jupyter-starters
"""
# pylint: disable=invalid-name,redefined-builtin,import-error
import pathlib
import subprocess
import sys
sys.path.insert(
0,
str(
(
pathlib.Path.cwd().parent / "python_packages" / "jupyter_lsp" / "src"
).resolve()
),
)
project = "Jupyter[Lab] Language Server"
copyright = "2021, Jupyter[Lab] Language Server Contributors"
author = "Jupyter[Lab] Language Server Contributors"
version = ""
release = ""
extensions = [
"myst_nb",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.ifconfig",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_autodoc_typehints",
]
templates_path = ["_templates"]
source_suffix = [".rst", ".md"]
master_doc = "index"
language = None
exclude_patterns = [
".ipynb_checkpoints/**",
"**/.ipynb_checkpoints/**",
"**/~.*",
"~.*",
"_build/**",
]
html_theme = "sphinx_book_theme"
html_static_path = ["_static"]
htmlhelp_basename = "jupyterlab-lsp"
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"jsonschema": ("https://python-jsonschema.readthedocs.io/en/stable/", None),
}
github_url = "https://github.com"
github_repo_org = "jupyter-lsp"
github_repo_name = "jupyterlab-lsp"
github_repo_slug = f"{github_repo_org}/{github_repo_name}"
github_repo_url = f"{github_url}/{github_repo_slug}"
extlinks = {
"issue": (f"{github_repo_url}/issues/%s", "#"),
"pr": (f"{github_repo_url}/pull/%s", "PR #"),
"commit": (f"{github_repo_url}/commit/%s", ""),
"gh": (f"{github_url}/%s", "GitHub: "),
}
html_show_sourcelink = True
html_context = {
"display_github": True,
# these automatically-generated pages will create broken links
"hide_github_pagenames": ["search", "genindex"],
"github_user": github_repo_org,
"github_repo": github_repo_name,
"github_version": "master",
"conf_py_path": "/docs/",
}
html_logo = "images/logo.png"
html_title = "Language Server Protocol integration for Jupyter[Lab]"
html_theme_options = {
"repository_url": github_repo_url,
"path_to_docs": "docs",
"use_fullscreen_button": True,
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"use_download_button": True,
}
# MyST-{NB}
jupyter_execute_notebooks = "force"
nb_output_stderr = "remove-warn"
myst_enable_extensions = [
"amsmath",
"deflist",
"dollarmath",
"html_admonition",
"html_image",
"smartquotes",
]
def setup(app):
"""Runs before the "normal business" of sphinx. Don't go too crazy here."""
app.add_css_file("css/custom.css")
subprocess.check_call(["jlpm", "--ignore-optional"])
|
tests/testmodule/__init__.py | MrCull/MonkeyType | 3,890 | 12683165 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
class Foo:
def __init__(self, arg1: str, arg2: int) -> None:
self.arg1 = arg1
self.arg2 = arg2
|
deepgraph/functions.py | deepgraph/deepgraph | 272 | 12683169 | """Auxiliary **connector** and **selector** functions to create edges.
This module provides auxiliary **connector** and **selector** functions
for the ``dg.DeepGraph.create_edges`` and
``dg.DeepGraph.create_ft_edges`` methods.
They are described in their corresponding docstrings.
"""
from __future__ import print_function, division, absolute_import
# Copyright (C) 2017-2020 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
# py2/3 compatibility
try:
range = xrange
except NameError:
pass
import numpy as np
__all__ = ['great_circle_dist',
'cp_node_intersection',
'cp_intersection_strength',
'hypergeometric_p_value',
]
# ============================================================================
# CONNECTORS
# ============================================================================
def great_circle_dist(lat_s, lat_t, lon_s, lon_t):
"""Return the great circle distance between nodes.
The latitude and longitude values in the node table have to be in signed
decimal degrees without compass direction (the sign indicates west/south).
The great circle distance is calculated using the spherical law of cosines.
"""
# dtypes
lat_s = np.array(lat_s, dtype=float)
lat_t = np.array(lat_t, dtype=float)
lon_s = np.array(lon_s, dtype=float)
lon_t = np.array(lon_t, dtype=float)
# select by event_indices
phi_i = np.radians(lat_s)
phi_j = np.radians(lat_t)
delta_alpha = np.radians(lon_t) - np.radians(lon_s)
# earth's radius
R = 6371
# spatial distance of nodes
gcd = np.arccos(np.sin(phi_i) * np.sin(phi_j) +
np.cos(phi_i) * np.cos(phi_j) *
np.cos(delta_alpha)) * R
# for 0 gcd, there might be nans, convert to 0.
gcd = np.nan_to_num(gcd)
return gcd
def cp_node_intersection(supernode_ids, sources, targets):
"""Work in progress!
"""
nodess = supernode_ids[sources]
nodest = supernode_ids[targets]
identical_nodes = (nodess == nodest)
intsec = np.zeros(len(sources), dtype=object)
intsec_card = np.zeros(len(sources), dtype=np.int)
for i in range(len(sources)):
intsec[i] = nodess[i].intersection(nodest[i])
intsec_card[i] = len(intsec[i])
return intsec, intsec_card, identical_nodes
def cp_intersection_strength(n_unique_nodes, intsec_card, sources, targets):
"""Work in progress!
"""
us = n_unique_nodes[sources]
ut = n_unique_nodes[targets]
# min cardinality
min_card = np.array(np.vstack((us, ut)).min(axis=0), dtype=np.float64)
# intersection strength
intsec_strength = intsec_card / min_card
return intsec_strength
def hypergeometric_p_value(n_unique_nodes, intsec_card, sources, targets):
"""Work in progress!
"""
from scipy.stats import hypergeom
us = n_unique_nodes[sources]
ut = n_unique_nodes[targets]
# population size
M = 220*220
# number of success states in population
n = np.vstack((us, ut)).max(axis=0)
# total draws
N = np.vstack((us, ut)).min(axis=0)
# successes
x = intsec_card
hg_p = np.zeros(len(sources))
for i in range(len(sources)):
hg_p[i] = hypergeom.sf(x[i], M, n[i], N[i])
return hg_p
# ============================================================================
# Selectors
# ============================================================================
|
setup.py | ralfgerlich/simupy | 436 | 12683180 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# get the version
exec(open('simupy/version.py').read())
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
long_description = long_description.replace(
"https://simupy.readthedocs.io/en/latest/",
"https://simupy.readthedocs.io/en/simupy-{}/".format(
'.'.join(__version__.split('.')[:3])
)
)
setup(
name='simupy',
version=__version__,
description='A framework for modeling and simulating dynamical systems.',
long_description=long_description,
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/simupy/simupy',
license="BSD 2-clause \"Simplified\" License",
python_requires='>=3',
install_requires=['numpy>=1.11.3', 'scipy>=0.18.1'],
extras_require={
'symbolic': ['sympy>=1.0'],
'doc': ['sphinx>=1.6.3', 'sympy>=1.0'],
'examples': ['matplotlib>=2.0', 'sympy>=1.0'],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
|
build/android/gyp/aar.py | Flameeyes/nojs | 2,151 | 12683183 | #!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Processes an Android AAR file."""
import argparse
import os
import posixpath
import re
import shutil
import sys
from xml.etree import ElementTree
import zipfile
from util import build_utils
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir)))
import gn_helpers
def _IsManifestEmpty(manifest_str):
"""Returns whether the given manifest has merge-worthy elements.
E.g.: <activity>, <service>, etc.
"""
doc = ElementTree.fromstring(manifest_str)
for node in doc:
if node.tag == 'application':
if len(node):
return False
elif node.tag != 'uses-sdk':
return False
return True
def _CreateInfo(aar_file):
data = {}
data['aidl'] = []
data['assets'] = []
data['resources'] = []
data['subjars'] = []
data['subjar_tuples'] = []
data['has_classes_jar'] = False
data['has_proguard_flags'] = False
data['has_native_libraries'] = False
data['has_r_text_file'] = False
with zipfile.ZipFile(aar_file) as z:
data['is_manifest_empty'] = (
_IsManifestEmpty(z.read('AndroidManifest.xml')))
for name in z.namelist():
if name.endswith('/'):
continue
if name.startswith('aidl/'):
data['aidl'].append(name)
elif name.startswith('res/'):
data['resources'].append(name)
elif name.startswith('libs/') and name.endswith('.jar'):
label = posixpath.basename(name)[:-4]
label = re.sub(r'[^a-zA-Z0-9._]', '_', label)
data['subjars'].append(name)
data['subjar_tuples'].append([label, name])
elif name.startswith('assets/'):
data['assets'].append(name)
elif name.startswith('jni/'):
data['has_native_libraries'] = True
elif name == 'classes.jar':
data['has_classes_jar'] = True
elif name == 'proguard.txt':
data['has_proguard_flags'] = True
elif name == 'R.txt':
# Some AARs, e.g. gvr_controller_java, have empty R.txt. Such AARs
# have no resources as well. We treat empty R.txt as having no R.txt.
data['has_r_text_file'] = (z.read('R.txt').strip() != '')
return """\
# Generated by //build/android/gyp/aar.py
# To regenerate, use "update_android_aar_prebuilts = true" and run "gn gen".
""" + gn_helpers.ToGNString(data)
def _AddCommonArgs(parser):
parser.add_argument('aar_file',
help='Path to the AAR file.',
type=os.path.normpath)
def main():
parser = argparse.ArgumentParser(description=__doc__)
command_parsers = parser.add_subparsers(dest='command')
subp = command_parsers.add_parser(
'list', help='Output a GN scope describing the contents of the .aar.')
_AddCommonArgs(subp)
subp.add_argument('--output',
help='Output file.',
type=argparse.FileType('w'),
default='-')
subp = command_parsers.add_parser('extract', help='Extracts the .aar')
_AddCommonArgs(subp)
subp.add_argument('--output-dir',
help='Output directory for the extracted files.',
required=True,
type=os.path.normpath)
subp.add_argument('--assert-info-file',
help='Path to .info file. Asserts that it matches what '
'"list" would output.',
type=argparse.FileType('r'))
args = parser.parse_args()
if args.command == 'extract':
if args.assert_info_file:
expected = _CreateInfo(args.aar_file)
actual = args.assert_info_file.read()
if actual != expected:
raise Exception('android_aar_prebuilt() cached .info file is '
'out-of-date. Run gn gen with '
'update_android_aar_prebuilts=true to update it.')
# Clear previously extracted versions of the AAR.
shutil.rmtree(args.output_dir, True)
build_utils.ExtractAll(args.aar_file, path=args.output_dir)
elif args.command == 'list':
args.output.write(_CreateInfo(args.aar_file))
if __name__ == '__main__':
sys.exit(main())
|
example/image-classification/benchmark_score.py | Vikas-kum/incubator-mxnet | 228 | 12683185 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Benchmark the scoring performance on various CNNs
"""
from common import find_mxnet
from common.util import get_gpus
import mxnet as mx
import mxnet.gluon.model_zoo.vision as models
from importlib import import_module
import logging
import argparse
import time
import numpy as np
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='SymbolAPI-based CNN inference performance benchmark')
parser.add_argument('--network', type=str, default='all',
choices=['all', 'alexnet', 'vgg-16', 'resnetv1-50', 'resnet-50',
'resnet-152', 'inception-bn', 'inception-v3',
'inception-v4', 'inception-resnet-v2', 'mobilenet',
'densenet121', 'squeezenet1.1'])
parser.add_argument('--batch-size', type=int, default=0,
help='Batch size to use for benchmarking. Example: 32, 64, 128.'
'By default, runs benchmark for batch sizes - 1, 32, 64, 128, 256')
opt = parser.parse_args()
def get_symbol(network, batch_size, dtype):
image_shape = (3,299,299) if network in ['inception-v3', 'inception-v4'] else (3,224,224)
num_layers = 0
if network == 'inception-resnet-v2':
network = network
elif 'resnet' in network:
num_layers = int(network.split('-')[1])
network = network.split('-')[0]
if 'vgg' in network:
num_layers = int(network.split('-')[1])
network = 'vgg'
if network in ['densenet121', 'squeezenet1.1']:
sym = models.get_model(network)
sym.hybridize()
data = mx.sym.var('data')
sym = sym(data)
sym = mx.sym.SoftmaxOutput(sym, name='softmax')
else:
net = import_module('symbols.'+network)
sym = net.get_symbol(num_classes=1000,
image_shape=','.join([str(i) for i in image_shape]),
num_layers=num_layers,
dtype=dtype)
return (sym, [('data', (batch_size,)+image_shape)])
def score(network, dev, batch_size, num_batches, dtype):
# get mod
sym, data_shape = get_symbol(network, batch_size, dtype)
mod = mx.mod.Module(symbol=sym, context=dev)
mod.bind(for_training = False,
inputs_need_grad = False,
data_shapes = data_shape)
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
# get data
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=dev) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, []) # empty label
# run
dry_run = 5 # use 5 iterations to warm up
for i in range(dry_run+num_batches):
if i == dry_run:
tic = time.time()
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
# return num images per second
return num_batches*batch_size/(time.time() - tic)
if __name__ == '__main__':
if opt.network == 'all':
networks = ['alexnet', 'vgg-16', 'resnetv1-50', 'resnet-50',
'resnet-152', 'inception-bn', 'inception-v3',
'inception-v4', 'inception-resnet-v2',
'mobilenet', 'densenet121', 'squeezenet1.1']
logging.info('It may take some time to run all models, '
'set --network to run a specific one')
else:
networks = [opt.network]
devs = [mx.gpu(0)] if len(get_gpus()) > 0 else []
# Enable USE_MKLDNN for better CPU performance
devs.append(mx.cpu())
if opt.batch_size == 0:
batch_sizes = [1, 32, 64, 128, 256]
logging.info('run batchsize [1, 32, 64, 128, 256] by default, '
'set --batch-size to run a specific one')
else:
batch_sizes = [opt.batch_size]
for net in networks:
logging.info('network: %s', net)
if net in ['densenet121', 'squeezenet1.1']:
logging.info('network: %s is converted from gluon modelzoo', net)
logging.info('you can run benchmark/python/gluon/benchmark_gluon.py for more models')
for d in devs:
logging.info('device: %s', d)
logged_fp16_warning = False
for b in batch_sizes:
for dtype in ['float32', 'float16']:
if d == mx.cpu() and dtype == 'float16':
#float16 is not supported on CPU
continue
elif net in ['inception-bn', 'alexnet'] and dtype == 'float16':
if not logged_fp16_warning:
logging.info('Model definition for {} does not support float16'.format(net))
logged_fp16_warning = True
else:
speed = score(network=net, dev=d, batch_size=b, num_batches=10, dtype=dtype)
logging.info('batch size %2d, dtype %s, images/sec: %f', b, dtype, speed)
|
taskflow/tests/unit/worker_based/test_types.py | JonasMie/taskflow | 299 | 12683188 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import reflection
from taskflow.engines.worker_based import types as worker_types
from taskflow import test
from taskflow.test import mock
from taskflow.tests import utils
class TestTopicWorker(test.TestCase):
def test_topic_worker(self):
worker = worker_types.TopicWorker("dummy-topic",
[utils.DummyTask], identity="dummy")
self.assertTrue(worker.performs(utils.DummyTask))
self.assertFalse(worker.performs(utils.NastyTask))
self.assertEqual('dummy', worker.identity)
self.assertEqual('dummy-topic', worker.topic)
class TestProxyFinder(test.TestCase):
@mock.patch("oslo_utils.timeutils.now")
def test_expiry(self, mock_now):
finder = worker_types.ProxyWorkerFinder('me', mock.MagicMock(), [],
worker_expiry=60)
w, emit = finder._add('dummy-topic', [utils.DummyTask])
w.last_seen = 0
mock_now.side_effect = [120]
gone = finder.clean()
self.assertEqual(0, finder.total_workers)
self.assertEqual(1, gone)
def test_single_topic_worker(self):
finder = worker_types.ProxyWorkerFinder('me', mock.MagicMock(), [])
w, emit = finder._add('dummy-topic', [utils.DummyTask])
self.assertIsNotNone(w)
self.assertTrue(emit)
self.assertEqual(1, finder.total_workers)
w2 = finder.get_worker_for_task(utils.DummyTask)
self.assertEqual(w.identity, w2.identity)
def test_multi_same_topic_workers(self):
finder = worker_types.ProxyWorkerFinder('me', mock.MagicMock(), [])
w, emit = finder._add('dummy-topic', [utils.DummyTask])
self.assertIsNotNone(w)
self.assertTrue(emit)
w2, emit = finder._add('dummy-topic-2', [utils.DummyTask])
self.assertIsNotNone(w2)
self.assertTrue(emit)
w3 = finder.get_worker_for_task(
reflection.get_class_name(utils.DummyTask))
self.assertIn(w3.identity, [w.identity, w2.identity])
def test_multi_different_topic_workers(self):
finder = worker_types.ProxyWorkerFinder('me', mock.MagicMock(), [])
added = []
added.append(finder._add('dummy-topic', [utils.DummyTask]))
added.append(finder._add('dummy-topic-2', [utils.DummyTask]))
added.append(finder._add('dummy-topic-3', [utils.NastyTask]))
self.assertEqual(3, finder.total_workers)
w = finder.get_worker_for_task(utils.NastyTask)
self.assertEqual(added[-1][0].identity, w.identity)
w = finder.get_worker_for_task(utils.DummyTask)
self.assertIn(w.identity, [w_a[0].identity for w_a in added[0:2]])
|
tests/test_server.py | bdowning/aiotools | 121 | 12683193 | <filename>tests/test_server.py
import pytest
import asyncio
import functools
import glob
import logging.config
import multiprocessing as mp
import os
import signal
import sys
import tempfile
import time
from typing import List, Sequence
import aiotools
if os.environ.get('CI', '') and sys.version_info < (3, 9, 0):
pytest.skip(
'skipped to prevent kill CI agents due to signals on CI environments',
allow_module_level=True,
)
@pytest.fixture
def restore_signal():
os.setpgrp()
old_alrm = signal.getsignal(signal.SIGALRM)
old_intr = signal.getsignal(signal.SIGINT)
old_term = signal.getsignal(signal.SIGTERM)
old_intr = signal.getsignal(signal.SIGUSR1)
yield
signal.signal(signal.SIGALRM, old_alrm)
signal.signal(signal.SIGINT, old_intr)
signal.signal(signal.SIGTERM, old_term)
signal.signal(signal.SIGUSR1, old_term)
@pytest.fixture
def set_timeout():
def make_timeout(sec, callback):
def _callback(signum, frame):
signal.alarm(0)
callback()
signal.signal(signal.SIGALRM, _callback)
signal.setitimer(signal.ITIMER_REAL, sec)
yield make_timeout
@pytest.fixture
def exec_recorder():
f = tempfile.NamedTemporaryFile(
mode='w', encoding='utf8',
prefix='aiotools.tests.server.',
)
f.close()
def write(msg: str) -> None:
path = f"{f.name}.{os.getpid()}"
with open(path, 'a', encoding='utf8') as writer:
writer.write(msg + '\n')
def read() -> Sequence[str]:
lines: List[str] = []
for path in glob.glob(f"{f.name}.*"):
with open(path, 'r', encoding='utf8') as reader:
lines.extend(line.strip() for line in reader.readlines())
return lines
yield write, read
for path in glob.glob(f"{f.name}.*"):
os.unlink(path)
def interrupt():
os.kill(0, signal.SIGINT)
def interrupt_usr1():
os.kill(os.getpid(), signal.SIGUSR1)
@aiotools.server # type: ignore
async def myserver_simple(loop, proc_idx, args):
write = args[0]
await asyncio.sleep(0)
write(f'started:{proc_idx}')
yield
await asyncio.sleep(0)
write(f'terminated:{proc_idx}')
def test_server_singleproc(set_timeout, restore_signal, exec_recorder):
write, read = exec_recorder
set_timeout(0.2, interrupt)
aiotools.start_server(
myserver_simple,
args=(write,),
)
lines = set(read())
assert 'started:0' in lines
assert 'terminated:0' in lines
def test_server_multiproc(set_timeout, restore_signal, exec_recorder):
write, read = exec_recorder
set_timeout(0.2, interrupt)
aiotools.start_server(
myserver_simple,
num_workers=3,
args=(write,),
)
lines = set(read())
assert lines == {
'started:0', 'started:1', 'started:2',
'terminated:0', 'terminated:1', 'terminated:2',
}
@aiotools.server # type: ignore
async def myserver_signal(loop, proc_idx, args):
write = args[0]
await asyncio.sleep(0)
write(f'started:{proc_idx}')
received_signum = yield
await asyncio.sleep(0)
write(f'terminated:{proc_idx}:{received_signum}')
def test_server_multiproc_custom_stop_signals(
set_timeout,
restore_signal,
exec_recorder,
):
write, read = exec_recorder
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(
myserver_signal,
num_workers=2,
stop_signals={signal.SIGUSR1},
args=(write,),
)
lines = set(read())
assert {'started:0', 'started:1'} < lines
assert {
f'terminated:0:{int(signal.SIGUSR1)}',
f'terminated:1:{int(signal.SIGUSR1)}',
} < lines
@aiotools.server # type: ignore
async def myserver_worker_init_error(loop, proc_idx, args):
write = args[0]
class _LogAdaptor:
def __init__(self, writer):
self.writer = writer
def write(self, msg):
msg = msg.strip().replace('\n', ' ')
self.writer(f'log:{proc_idx}:{msg}')
log_stream = _LogAdaptor(write)
logging.config.dictConfig({
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': log_stream,
'level': 'DEBUG',
},
},
'loggers': {
'aiotools': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
})
log = logging.getLogger('aiotools')
write(f'started:{proc_idx}')
log.debug('hello')
if proc_idx in (0, 2):
# delay until other workers start normally.
await asyncio.sleep(0.1 * proc_idx)
raise ZeroDivisionError('oops')
yield
# should not be reached if errored.
await asyncio.sleep(0)
write(f'terminated:{proc_idx}')
def test_server_worker_init_error(restore_signal, exec_recorder):
write, read = exec_recorder
aiotools.start_server(
myserver_worker_init_error,
num_workers=4,
args=(write,),
)
lines = set(read())
assert sum(1 if line.startswith('started:') else 0 for line in lines) == 4
# workers who did not raise errors have already started,
# and they should have terminated normally
# when the errorneous worker interrupted the main loop.
assert sum(1 if line.startswith('terminated:') else 0 for line in lines) == 2
assert sum(1 if 'hello' in line else 0 for line in lines) == 4
assert sum(1 if 'ZeroDivisionError: oops' in line else 0 for line in lines) == 2
def test_server_user_main(set_timeout, restore_signal):
main_enter = False
main_exit = False
@aiotools.main
def mymain_user_main():
nonlocal main_enter, main_exit
main_enter = True
yield 987
main_exit = True
@aiotools.server # type: ignore
async def myworker_user_main(loop, proc_idx, args):
assert args[0] == 987 # first arg from user main
assert args[1] == 123 # second arg from start_server args
yield
set_timeout(0.2, interrupt)
aiotools.start_server(
myworker_user_main,
mymain_user_main,
num_workers=3,
args=(123,),
)
assert main_enter
assert main_exit
def test_server_user_main_custom_stop_signals(set_timeout, restore_signal):
main_enter = False
main_exit = False
main_signal = None
worker_signals = mp.Array('i', 3)
@aiotools.main
def mymain():
nonlocal main_enter, main_exit, main_signal
main_enter = True
main_signal = yield
main_exit = True
@aiotools.server
async def myworker(loop, proc_idx, args):
worker_signals = args[0]
worker_signals[proc_idx] = yield
def noop(signum, frame):
pass
set_timeout(0.2, interrupt_usr1)
aiotools.start_server(
myworker,
mymain,
num_workers=3,
stop_signals={signal.SIGUSR1},
args=(worker_signals,),
)
assert main_enter
assert main_exit
assert main_signal == signal.SIGUSR1
assert list(worker_signals) == [signal.SIGUSR1] * 3
def test_server_user_main_tuple(set_timeout, restore_signal):
main_enter = False
main_exit = False
@aiotools.main
def mymain():
nonlocal main_enter, main_exit
main_enter = True
yield 987, 654
main_exit = True
@aiotools.server
async def myworker(loop, proc_idx, args):
assert args[0] == 987 # first arg from user main
assert args[1] == 654 # second arg from user main
assert args[2] == 123 # third arg from start_server args
yield
set_timeout(0.2, interrupt)
aiotools.start_server(
myworker,
mymain,
num_workers=3,
args=(123,),
)
assert main_enter
assert main_exit
def test_server_extra_proc(set_timeout, restore_signal):
extras = mp.Array('i', [0, 0])
def extra_proc(key, _, pidx, args):
assert _ is None
extras[key] = 980 + key
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
print(f'extra[{key}] interrupted', file=sys.stderr)
except Exception as e:
print(f'extra[{key}] exception', e, file=sys.stderr)
finally:
print(f'extra[{key}] finish', file=sys.stderr)
extras[key] = 990 + key
@aiotools.server
async def myworker(loop, pidx, args):
yield
set_timeout(0.2, interrupt)
aiotools.start_server(myworker, extra_procs=[
functools.partial(extra_proc, 0),
functools.partial(extra_proc, 1)],
num_workers=3, args=(123, ))
assert extras[0] == 990
assert extras[1] == 991
def test_server_extra_proc_custom_stop_signal(set_timeout, restore_signal):
received_signals = mp.Array('i', [0, 0])
def extra_proc(key, _, pidx, args):
received_signals = args[0]
try:
while True:
time.sleep(0.1)
except aiotools.InterruptedBySignal as e:
received_signals[key] = e.args[0]
@aiotools.server
async def myworker(loop, pidx, args):
yield
set_timeout(0.3, interrupt_usr1)
aiotools.start_server(myworker, extra_procs=[
functools.partial(extra_proc, 0),
functools.partial(extra_proc, 1)],
stop_signals={signal.SIGUSR1},
args=(received_signals, ),
num_workers=3)
assert received_signals[0] == signal.SIGUSR1
assert received_signals[1] == signal.SIGUSR1
|
MLPYthonEnv/ml-agents-release_17/ml-agents/mlagents/trainers/tests/test_buffer.py | cihan-demir/NineMensMorris | 13,653 | 12683206 | import numpy as np
from mlagents.trainers.buffer import (
AgentBuffer,
AgentBufferField,
BufferKey,
ObservationKeyPrefix,
RewardSignalKeyPrefix,
)
from mlagents.trainers.trajectory import ObsUtil
def assert_array(a, b):
assert a.shape == b.shape
la = list(a.flatten())
lb = list(b.flatten())
for i in range(len(la)):
assert la[i] == lb[i]
def construct_fake_buffer(fake_agent_id):
b = AgentBuffer()
for step in range(9):
b[ObsUtil.get_name_at(0)].append(
np.array(
[
100 * fake_agent_id + 10 * step + 1,
100 * fake_agent_id + 10 * step + 2,
100 * fake_agent_id + 10 * step + 3,
],
dtype=np.float32,
)
)
b[BufferKey.CONTINUOUS_ACTION].append(
np.array(
[
100 * fake_agent_id + 10 * step + 4,
100 * fake_agent_id + 10 * step + 5,
],
dtype=np.float32,
)
)
b[BufferKey.GROUP_CONTINUOUS_ACTION].append(
[
np.array(
[
100 * fake_agent_id + 10 * step + 4,
100 * fake_agent_id + 10 * step + 5,
],
dtype=np.float32,
)
]
* 3
)
return b
def test_buffer():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
agent_3_buffer = construct_fake_buffer(3)
# Test get_batch
a = agent_1_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=1, sequential=True
)
assert_array(
np.array(a), np.array([[171, 172, 173], [181, 182, 183]], dtype=np.float32)
)
# Test get_batch
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=3, sequential=True
)
assert_array(
np.array(a),
np.array(
[
[231, 232, 233],
[241, 242, 243],
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
],
dtype=np.float32,
),
)
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=2, training_length=3, sequential=False
)
assert_array(
np.array(a),
np.array(
[
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
]
),
)
# Test padding
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=None, training_length=4, sequential=True
)
assert_array(
np.array(a),
np.array(
[
[201, 202, 203],
[211, 212, 213],
[221, 222, 223],
[231, 232, 233],
[241, 242, 243],
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
),
)
# Test group entries return Lists of Lists. Make sure to pad properly!
a = agent_2_buffer[BufferKey.GROUP_CONTINUOUS_ACTION].get_batch(
batch_size=None, training_length=4, sequential=True
)
for _group_entry in a[:-3]:
assert len(_group_entry) == 3
for _group_entry in a[-3:]:
assert len(_group_entry) == 0
agent_1_buffer.reset_agent()
assert agent_1_buffer.num_experiences == 0
update_buffer = AgentBuffer()
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_3_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 20
assert np.array(update_buffer[BufferKey.CONTINUOUS_ACTION]).shape == (20, 2)
c = update_buffer.make_mini_batch(start=0, end=1)
assert c.keys() == update_buffer.keys()
# Make sure the values of c are AgentBufferField
for val in c.values():
assert isinstance(val, AgentBufferField)
assert np.array(c[BufferKey.CONTINUOUS_ACTION]).shape == (1, 2)
def test_agentbufferfield():
# Test constructor
a = AgentBufferField([0, 1, 2])
for i, num in enumerate(a):
assert num == i
# Test indexing
assert a[i] == num
# Test slicing
b = a[1:3]
assert b == [1, 2]
assert isinstance(b, AgentBufferField)
# Test padding
c = AgentBufferField()
for _ in range(2):
c.append([np.array(1), np.array(2)])
for _ in range(2):
c.append([np.array(1)])
padded = c.padded_to_batch(pad_value=3)
assert np.array_equal(padded[0], np.array([1, 1, 1, 1]))
assert np.array_equal(padded[1], np.array([2, 2, 3, 3]))
# Make sure it doesn't fail when the field isn't a list
padded_a = a.padded_to_batch()
assert np.array_equal(padded_a, a)
def fakerandint(values):
return 19
def test_buffer_sample():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test non-LSTM
mb = update_buffer.sample_mini_batch(batch_size=4, sequence_length=1)
assert mb.keys() == update_buffer.keys()
assert np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape == (4, 2)
# Test LSTM
# We need to check if we ever get a breaking start - this will maximize the probability
mb = update_buffer.sample_mini_batch(batch_size=20, sequence_length=19)
assert mb.keys() == update_buffer.keys()
# Should only return one sequence
assert np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape == (19, 2)
def test_num_experiences():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 0
assert update_buffer.num_experiences == 0
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
assert len(update_buffer[BufferKey.CONTINUOUS_ACTION]) == 20
assert update_buffer.num_experiences == 20
def test_buffer_truncate():
agent_1_buffer = construct_fake_buffer(1)
agent_2_buffer = construct_fake_buffer(2)
update_buffer = AgentBuffer()
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test non-LSTM
update_buffer.truncate(2)
assert update_buffer.num_experiences == 2
agent_1_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
agent_2_buffer.resequence_and_append(
update_buffer, batch_size=None, training_length=2
)
# Test LSTM, truncate should be some multiple of sequence_length
update_buffer.truncate(4, sequence_length=3)
assert update_buffer.num_experiences == 3
for buffer_field in update_buffer.values():
assert isinstance(buffer_field, AgentBufferField)
def test_key_encode_decode():
keys = (
list(BufferKey)
+ [(k, 42) for k in ObservationKeyPrefix]
+ [(k, "gail") for k in RewardSignalKeyPrefix]
)
for k in keys:
assert k == AgentBuffer._decode_key(AgentBuffer._encode_key(k))
def test_buffer_save_load():
original = construct_fake_buffer(3)
import io
write_buffer = io.BytesIO()
original.save_to_file(write_buffer)
loaded = AgentBuffer()
loaded.load_from_file(write_buffer)
assert len(original) == len(loaded)
for k in original.keys():
assert np.allclose(original[k], loaded[k])
|
SOA/paf-cython/Extension/test.py | awaiswill/present | 120 | 12683220 | <reponame>awaiswill/present<filename>SOA/paf-cython/Extension/test.py
import Calculate as c
print(c.sdev([1,2,3,4,5]))
|
avocado/model.py | sry002/avocado | 104 | 12683245 | <reponame>sry002/avocado<filename>avocado/model.py<gh_stars>100-1000
# models.py
# Contact: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""
Avocado is deep tensor factorization model for learning a latent representation
of the human epigenome. This file has functions for building a deep tensor
factorization model.
"""
from .io import data_generator
from .io import permuted_data_generator
from .io import sequential_data_generator
import json
import numpy
import keras
from keras.layers import Input, Embedding, Dense
from keras.layers import Multiply, Dot, Flatten, concatenate
from keras.models import Model
from keras.optimizers import Adam
def build_model(n_celltypes, n_celltype_factors, n_assays, n_assay_factors,
n_genomic_positions, n_25bp_factors, n_250bp_factors, n_5kbp_factors,
n_layers, n_nodes, freeze_celltypes=False, freeze_assays=False,
freeze_genome_25bp=False, freeze_genome_250bp=False,
freeze_genome_5kbp=False, freeze_network=False):
"""This function builds a multi-scale deep tensor factorization model."""
celltype_input = Input(shape=(1,), name="celltype_input")
celltype_embedding = Embedding(n_celltypes, n_celltype_factors,
input_length=1, name="celltype_embedding")
celltype_embedding.trainable = not freeze_celltypes
celltype = Flatten()(celltype_embedding(celltype_input))
assay_input = Input(shape=(1,), name="assay_input")
assay_embedding = Embedding(n_assays, n_assay_factors,
input_length=1, name="assay_embedding")
assay_embedding.trainable = not freeze_assays
assay = Flatten()(assay_embedding(assay_input))
genome_25bp_input = Input(shape=(1,), name="genome_25bp_input")
genome_25bp_embedding = Embedding(n_genomic_positions, n_25bp_factors,
input_length=1, name="genome_25bp_embedding")
genome_25bp_embedding.trainable = not freeze_genome_25bp
genome_25bp = Flatten()(genome_25bp_embedding(genome_25bp_input))
genome_250bp_input = Input(shape=(1,), name="genome_250bp_input")
genome_250bp_embedding = Embedding(int(n_genomic_positions / 10) + 1,
n_250bp_factors, input_length=1, name="genome_250bp_embedding")
genome_250bp_embedding.trainable = not freeze_genome_250bp
genome_250bp = Flatten()(genome_250bp_embedding(genome_250bp_input))
genome_5kbp_input = Input(shape=(1,), name="genome_5kbp_input")
genome_5kbp_embedding = Embedding(int(n_genomic_positions / 200) + 1,
n_5kbp_factors, input_length=1, name="genome_5kbp_embedding")
genome_5kbp_embedding.trainable = not freeze_genome_5kbp
genome_5kbp = Flatten()(genome_5kbp_embedding(genome_5kbp_input))
layers = [celltype, assay, genome_25bp, genome_250bp, genome_5kbp]
inputs = (celltype_input, assay_input, genome_25bp_input,
genome_250bp_input, genome_5kbp_input)
x = concatenate(layers)
for i in range(n_layers):
layer = Dense(n_nodes, activation='relu', name="dense_{}".format(i))
layer.trainable = not freeze_network
x = layer(x)
layer = Dense(1, name="y_pred")
layer.trainable = not freeze_network
y = layer(x)
model = Model(inputs=inputs, outputs=y)
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
return model
class Avocado(object):
"""An Avocado multi-scale deep tensor factorization model.
The Avocado model is a multi-scale deep tensor factorization model. It is
multi-scale because it represents the genome axis using three different
resolutions---25 bp, 250 bp and 5 kbp. It is deep because it replaces the
dot product component of most linear factorization approaches with a deep
neural network. The tensor factors and the neural network weights are
trained jointly to impute the values in the tensor that it is provided.
In this case Avocado is trained on epigenomic data whose dimensions are
human cell type, epigenomic assay, and genomic coordinate. The trained
model can impute epigenomic assays that have not yet been performed, and
the learned factor values can themselves be used to represent genomic
positions more compactly than the full set of epigenomic measurements
could.
The default parameters are those used in the manuscript entitled
"Multi-scale deep tensor factorization learns a latent representation
of the human epigenome".
Parameters
----------
celltypes : list
The list of cell type names that will be modeled
assays : list
The list of assays that will be modeled
n_celltype_factors : int, optional
The number of factors to use to represent each cell type. Default is 32.
n_assay_factors : int, optional
The number of factors to use to represent each assay. Default is 256.
n_genomic_positions : int, optional
The number of genomic positions to model. This is typically either
the size of the pilot regions when performing initial training or
the size of the chromosome when fitting the genomic latent factors.
Default is 1126469, the size of the pilot regions in chr1-22.
n_25bp_factors : int, optional
The number of factors to use to represent the genome at 25 bp
resolution. Default is 25.
n_250bp_factors : int, optional
The number of factors to use to represent the genome at 250 bp
resolution. Default is 40.
n_5kbp_factors : int, optional
The number of factors to use to represent the genome at 5 kbp
resolution. Default is 45.
n_layers : int, optional
The number of hidden layers in the neural model. Default is 2.
n_nodes : int, optional
The number of nodes per layer. Default is 2048.
batch_size : int, optional
The size of each batch to use in training. Defaut is 40000.
freeze_celltypes : bool, optional
Whether to freeze the training of the cell type embedding. Default
is False.
freeze_assays : bool, optional
Whether to freeze the training of the assay embeddings. Default
is False.
freeze_genome_25bp : bool, optional
Whether to freeze the training of the 25 bp genome factors. Default
is False.
freeze_genome_250bp : bool, optional
Whether to freeze the training of the 250 bp genome factors. Default
is False.
freeze_genome_5kbp : bool, optional
Whether to freeze the training of the 5 kbp genome factors. Default
is False.
freeze_network : bool, optional
Whether to freeze the training of the neural network. Default
is False.
Example
-------
>>> import numpy, itertools
>>> from avocado import Avocado
>>>
>>> celltypes = ['E003', 'E017', 'E065', 'E116', 'E117']
>>> assays = ['H3K4me3', 'H3K27me3', 'H3K36me3', 'H3K9me3', 'H3K4me1']
>>>
>>> data = {}
>>> for celltype, assay in itertools.product(celltypes, assays):
>>> filename = 'data/{}.{}.pilot.arcsinh.npz'.format(celltype, assay)
>>> data[(celltype, assay)] = numpy.load(filename)['arr_0']
>>>
>>> model = Avocado(celltypes, assays)
>>> model.fit(data)
>>>
>>> track = model.predict("E065", "H3K27me3")
"""
def __init__(self, celltypes, assays, n_celltype_factors=32,
n_assay_factors=256, n_genomic_positions=1126469,
n_25bp_factors=25, n_250bp_factors=40, n_5kbp_factors=45, n_layers=2,
n_nodes=2048, batch_size=40000, freeze_celltypes=False,
freeze_assays=False, freeze_genome_25bp=False, freeze_genome_250bp=False,
freeze_genome_5kbp=False, freeze_network=False):
self.celltypes = list(celltypes)
self.assays = list(assays)
self.experiments = []
self.n_celltypes = len(celltypes)
self.n_assays = len(assays)
self.batch_size = batch_size
self.n_celltype_factors = n_celltype_factors
self.n_celltype_factors = n_celltype_factors
self.n_assay_factors = n_assay_factors
self.n_genomic_positions = n_genomic_positions
self.n_25bp_factors = n_25bp_factors
self.n_250bp_factors = n_250bp_factors
self.n_5kbp_factors = n_5kbp_factors
self.n_layers = n_layers
self.n_nodes = n_nodes
self.freeze_celltypes = freeze_celltypes
self.freeze_assays = freeze_assays
self.freeze_genome_25bp = freeze_genome_25bp
self.freeze_genome_250bp = freeze_genome_250bp
self.freeze_genome_5kbp = freeze_genome_5kbp
self.freeze_network = freeze_network
self.model = build_model(n_celltypes=self.n_celltypes,
n_celltype_factors=n_celltype_factors,
n_assays=self.n_assays,
n_assay_factors=n_assay_factors,
n_genomic_positions=n_genomic_positions,
n_25bp_factors=n_25bp_factors,
n_250bp_factors=n_250bp_factors,
n_5kbp_factors=n_5kbp_factors,
n_layers=n_layers,
n_nodes=n_nodes,
freeze_celltypes=freeze_celltypes,
freeze_assays=freeze_assays,
freeze_genome_25bp=freeze_genome_25bp,
freeze_genome_250bp=freeze_genome_250bp,
freeze_genome_5kbp=freeze_genome_5kbp,
freeze_network=freeze_network)
@property
def celltype_embedding(self):
"""Returns the learned cell type embedding as a numpy array.
Parameters
----------
None
Returns
-------
celltype_embedding : numpy.ndarray, shape=(n_celltypes, n_factors)
The learned embedding corresponding to the input name
'celltype_embedding'. The cell types are ordered according to the
order defined in self.celltypes.
"""
for layer in self.model.layers:
if layer.name == 'celltype_embedding':
return layer.get_weights()[0]
raise ValueError("No layer in model named 'celltype_embedding'.")
@property
def assay_embedding(self):
"""Returns the learned assay embedding as a numpy array.
Parameters
----------
None
Returns
-------
assay_embedding : numpy.ndarray, shape=(n_assays, n_factors)
The learned embedding corresponding to the input name
'assay_embedding'. The assays are ordered according to the order
defined in self.assays.
"""
for layer in self.model.layers:
if layer.name == 'assay_embedding':
return layer.get_weights()[0]
raise ValueError("No layer in model named 'assay_embedding'.")
@property
def genome_embedding(self):
"""Returns the learned genomic embedding as a numpy array.
This function will concatenate together the three resolutions of
genomic factors, such that the first columns correspond to the
25 bp factors, the next columns correspond to the 250 bp factors,
and the final columns correspond to the 5 kbp factors. The factors
that span more than 25 bp will be repeated across several successive
positions
Parameters
----------
None
Returns
-------
genome_embedding : numpy.ndarray, shape=(n_genomic_positions,
n_25bp_factors + n_250bp_factors + n_5kbp_factors)
The learned embedding corresponding to the input names
genome_25bp_embedding, genome_250bp_embedding, and
genome_5kbp_embedding.
"""
n_25bp = self.n_25bp_factors
n_250bp = self.n_250bp_factors
n_5kbp = self.n_5kbp_factors
genome_embedding = numpy.empty((self.n_genomic_positions,
n_25bp + n_250bp + n_5kbp))
for layer in self.model.layers:
if layer.name == 'genome_25bp_embedding':
genome_25bp_embedding = layer.get_weights()[0]
elif layer.name == 'genome_250bp_embedding':
genome_250bp_embedding = layer.get_weights()[0]
elif layer.name == 'genome_5kbp_embedding':
genome_5kbp_embedding = layer.get_weights()[0]
n1 = n_25bp
n2 = n_25bp + n_250bp
for i in range(self.n_genomic_positions):
genome_embedding[i, :n1] = genome_25bp_embedding[i]
genome_embedding[i, n1:n2] = genome_250bp_embedding[i // 10]
genome_embedding[i, n2:] = genome_5kbp_embedding[i // 200]
return genome_embedding
def summary(self):
"""A wrapper method for the keras summary method."""
self.model.summary()
def fit(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,
verbose=1, callbacks=None, sampling='sequential', input_generator=None,
**kwargs):
"""Fit the model to the given epigenomic tracks.
Pass in a dictionary of training data and an optional dictionary of
validation data. The keys to this dictionary are a tuple of the format
(celltype, assay) and the values are the corresponding track in the
form of a numpy array. The tracks can either be in the form of an array
that is in memory or as a memory map.
Parameters
----------
X_train : dict
A dictionary of training data values, where the keys are a tuple of
(celltype, assay) and the values are a track.
X_valid : dict or None, optional
A dictionary of validation data values that are used to calculate
validation set MSE during the training process. If None, validation
set statistics are not calculated during the training process.
Default is None.
n_epochs : int, optional
The number of epochs to train on before ending training. Default is 120.
epoch_size : int, optional
The number of batches per epoch. Default is 200.
verbose: int, optional
The verbosity level of training. Must be one of 0, 1, or 2, where 0
means silent, 1 means progress bar, and 2 means use only one line
per epoch. Default is 1.
callbacks : list or None, optional
A list of keras callback instances to be called during training.
sampling : str, optional
The sampling strategy to use for the generators. Must be one of the
following:
'sequential' : Sequentially scans through the genome indexes,
selecting a cell type and assay randomly at each position
'permuted' : Sequentially scans through a permuted version
of the genome indexes, such that each epoch sees every
genomic index once, but each batch sees nearly random
indexes
'random' : Randomly selects genomic positions. No guarantee
on the number of times each position has been seen.
Default is 'sequential'.
input_generator : generator or None, optional
A custom data generator object to be used in the place of the
default generator. This will only change the training generator,
not the validation generator. Default is None.
**kwargs : optional
Any other keyword arguments to be passed into the `fit_generator`
method.
Returns
-------
history : keras.History.history
The keras history object that records training loss values and
metric values.
"""
if not isinstance(X_train, dict):
raise ValueError("X_train must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
if X_valid is not None and not isinstance(X_valid, dict):
raise ValueError("X_valid must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
for (celltype, assay), track in X_train.items():
if celltype not in self.celltypes:
raise ValueError("Celltype {} appears in the training data " \
"but not in the list of cell types provided to the " \
"model.".format(celltype))
if assay not in self.assays:
raise ValueError("Assay {} appears in the training data " \
"but not in the list of assays provided to the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
if X_valid is not None:
for (celltype, assay), track in X_valid.items():
if celltype not in self.celltypes:
raise ValueError("Celltype {} appears in the validation " \
"data but not in the list of cell types provided to " \
"the model.".format(celltype))
if assay not in self.assays:
raise ValueError("Assay {} appears in the validation " \
"data but not in the list of assays provided to the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
if input_generator is not None:
X_train_gen = input_generator
elif sampling == 'sequential':
X_train_gen = sequential_data_generator(self.celltypes,
self.assays, X_train, self.n_genomic_positions,
self.batch_size)
elif sampling == 'permuted':
X_train_gen = permuted_data_generator(self.celltypes,
self.assays, X_train, self.n_genomic_positions,
self.batch_size)
elif sampling == 'random':
X_train_gen = permuted_data_generator(self.celltypes,
self.assays, X_train, self.n_genomic_positions,
self.batch_size)
if X_valid is not None:
X_valid_gen = data_generator(self.celltypes, self.assays,
X_valid, self.n_genomic_positions, self.batch_size)
history = self.model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, validation_data=X_valid_gen,
validation_steps=30, verbose=verbose, callbacks=callbacks,
**kwargs)
else:
history = self.model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, verbose=verbose,
callbacks=callbacks, **kwargs)
self.experiments = list(X_train.keys())
return history
def fit_celltypes(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,
verbose=1, callbacks=None, **kwargs):
"""Add a new cell type(s) to an otherwise frozen model.
This method will add a new cell type to the cell type embedding after
freezing all of the other parameters in the model, including weights
and the other cell type positions. Functionally it will train a new
cell type embedding and return a new model whose cell type embedding
is the concatenation of the old cell type embedding and the new one.
Pass in a dictionary of training data and an optional dictionary of
validation data. The keys to this dictionary are a tuple of the format
(celltype, assay) and the values are the corresponding track in the
form of a numpy array. The tracks can either be in the form of an array
that is in memory or as a memory map. The celltypes provided should not
appear in the model.celltypes attribute but the assays should exclusively
appear in the model.assays attribute.
Parameters
----------
X_train : dict
A dictionary of training data values, where the keys are a tuple of
(celltype, assay) and the values are a track.
X_valid : dict or None, optional
A dictionary of validation data values that are used to calculate
validation set MSE during the training process. If None, validation
set statistics are not calculated during the training process.
Default is None.
n_epochs : int, optional
The number of epochs to train on before ending training. Default is 120.
epoch_size : int, optional
The number of batches per epoch. Default is 200.
verbose: int, optional
The verbosity level of training. Must be one of 0, 1, or 2, where 0
means silent, 1 means progress bar, and 2 means use only one line
per epoch.
callbacks : list or None, optional
A list of keras callback instances to be called during training.
**kwargs : optional
Any other keyword arguments to be passed into the `fit_generator`
method.
Returns
-------
history : keras.History.history
The keras history object that records training loss values and
metric values.
"""
if not isinstance(X_train, dict):
raise ValueError("X_train must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
if X_valid is not None and not isinstance(X_valid, dict):
raise ValueError("X_valid must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
for (celltype, assay), track in X_train.items():
if celltype in self.celltypes:
raise ValueError("Celltype {} appears in the training data " \
"and also in the list of cell types already in the " \
"model.".format(celltype))
if assay not in self.assays:
raise ValueError("Assay {} appears in the training data " \
"but not in the list of assays provided to the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
if X_valid is not None:
for (celltype, assay), track in X_valid.items():
if celltype in self.celltypes:
raise ValueError("Celltype {} appears in the validation " \
"data and also in the list of cell types already in " \
"the model.".format(celltype))
if assay not in self.assays:
raise ValueError("Assay {} appears in the training data " \
"but not in the list of assays provided to the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
new_celltypes = list(numpy.unique([ct for ct, _ in X_train.keys()]))
model = build_model(n_celltypes=len(new_celltypes),
n_celltype_factors=self.n_celltype_factors,
n_assays=self.n_assays,
n_assay_factors=self.n_assay_factors,
n_genomic_positions=self.n_genomic_positions,
n_25bp_factors=self.n_25bp_factors,
n_250bp_factors=self.n_250bp_factors,
n_5kbp_factors=self.n_5kbp_factors,
n_layers=self.n_layers,
n_nodes=self.n_nodes,
freeze_celltypes=False,
freeze_assays=True,
freeze_genome_25bp=True,
freeze_genome_250bp=True,
freeze_genome_5kbp=True,
freeze_network=True)
for old_layer, new_layer in zip(self.model.layers, model.layers):
if 'input' in old_layer.name:
continue
if old_layer.name == 'celltype_embedding':
continue
new_layer.set_weights(old_layer.get_weights())
X_train_gen = sequential_data_generator(new_celltypes, self.assays,
X_train, self.n_genomic_positions, self.batch_size)
if X_valid is not None:
X_valid_gen = data_generator(new_celltypes, self.assays,
X_valid, self.n_genomic_positions, self.batch_size)
history = model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, validation_data=X_valid_gen,
validation_steps=30, verbose=verbose, callbacks=callbacks,
**kwargs)
else:
history = model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, verbose=verbose,
callbacks=callbacks, **kwargs)
for layer in self.model.layers:
if layer.name == 'celltype_embedding':
celltype_embedding = layer.get_weights()[0]
break
for layer in model.layers:
if layer.name == 'celltype_embedding':
new_celltype_embedding = layer.get_weights()[0]
break
celltype_embedding = numpy.concatenate([celltype_embedding,
new_celltype_embedding])
self.celltypes.extend(new_celltypes)
self.n_celltypes = len(self.celltypes)
model = build_model(n_celltypes=self.n_celltypes,
n_celltype_factors=self.n_celltype_factors,
n_assays=self.n_assays,
n_assay_factors=self.n_assay_factors,
n_genomic_positions=self.n_genomic_positions,
n_25bp_factors=self.n_25bp_factors,
n_250bp_factors=self.n_250bp_factors,
n_5kbp_factors=self.n_5kbp_factors,
n_layers=self.n_layers,
n_nodes=self.n_nodes,
freeze_celltypes=self.freeze_celltypes,
freeze_assays=self.freeze_assays,
freeze_genome_25bp=self.freeze_genome_25bp,
freeze_genome_250bp=self.freeze_genome_250bp,
freeze_genome_5kbp=self.freeze_genome_5kbp,
freeze_network=self.freeze_network)
for old_layer, new_layer in zip(self.model.layers, model.layers):
if 'input' in old_layer.name:
continue
if old_layer.name == 'celltype_embedding':
new_layer.set_weights([celltype_embedding])
else:
new_layer.set_weights(old_layer.get_weights())
model.experiments = self.experiments + list(X_train.keys())
self.model = model
return history
def fit_assays(self, X_train, X_valid=None, n_epochs=200, epoch_size=120,
verbose=1, callbacks=None, **kwargs):
"""Add a new assay(s) to an otherwise frozen model.
This method will add a new assay to the assay embedding after
freezing all of the other parameters in the model, including weights
and the other assay positions. Functionally it will train a new
assay embedding and return a new model whose assay embedding
is the concatenation of the old assay embedding and the new one.
Pass in a dictionary of training data and an optional dictionary of
validation data. The keys to this dictionary are a tuple of the format
(celltype, assay) and the values are the corresponding track in the
form of a numpy array. The tracks can either be in the form of an array
that is in memory or as a memory map. The assays provided should not
appear in the model.assays attribute, but the cell types should appear
in the model.celltypes attribute.
Parameters
----------
X_train : dict
A dictionary of training data values, where the keys are a tuple of
(celltype, assay) and the values are a track.
X_valid : dict or None, optional
A dictionary of validation data values that are used to calculate
validation set MSE during the training process. If None, validation
set statistics are not calculated during the training process.
Default is None.
n_epochs : int, optional
The number of epochs to train on before ending training. Default is 120.
epoch_size : int, optional
The number of batches per epoch. Default is 200.
verbose: int, optional
The verbosity level of training. Must be one of 0, 1, or 2, where 0
means silent, 1 means progress bar, and 2 means use only one line
per epoch.
callbacks : list or None, optional
A list of keras callback instances to be called during training.
**kwargs : optional
Any other keyword arguments to be passed into the `fit_generator`
method.
Returns
-------
history : keras.History.history
The keras history object that records training loss values and
metric values.
"""
if not isinstance(X_train, dict):
raise ValueError("X_train must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
if X_valid is not None and not isinstance(X_valid, dict):
raise ValueError("X_valid must be a dictionary where the keys" \
" are (celltype, assay) tuples and the values are the track" \
" corresponding to that pair.")
for (celltype, assay), track in X_train.items():
if celltype not in self.celltypes:
raise ValueError("Celltype {} appears in the training data " \
"but not in the list of cell types already in the " \
"model.".format(celltype))
if assay in self.assays:
raise ValueError("Assay {} appears in the training data " \
"and also in the list of assays already in the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
if X_valid is not None:
for (celltype, assay), track in X_valid.items():
if celltype not in self.celltypes:
raise ValueError("Celltype {} appears in the validation " \
"data but not in the list of cell types already in " \
"the model.".format(celltype))
if assay in self.assays:
raise ValueError("Assay {} appears in the training data " \
"and also in the list of assays already in the " \
"model.".format(assay))
if len(track) != self.n_genomic_positions:
raise ValueError("The track corresponding to {} {} is of " \
"size {} while the model encodes {} genomic " \
"positions".format(celltype, assay, len(track),
self.n_genomic_positions))
new_assays = list(numpy.unique([assay for _, assay in X_train.keys()]))
model = build_model(n_celltypes=self.n_celltypes,
n_celltype_factors=self.n_celltype_factors,
n_assays=len(new_assays),
n_assay_factors=self.n_assay_factors,
n_genomic_positions=self.n_genomic_positions,
n_25bp_factors=self.n_25bp_factors,
n_250bp_factors=self.n_250bp_factors,
n_5kbp_factors=self.n_5kbp_factors,
n_layers=self.n_layers,
n_nodes=self.n_nodes,
freeze_celltypes=True,
freeze_assays=False,
freeze_genome_25bp=True,
freeze_genome_250bp=True,
freeze_genome_5kbp=True,
freeze_network=True)
for old_layer, new_layer in zip(self.model.layers, model.layers):
if 'input' in old_layer.name:
continue
if old_layer.name == 'assay_embedding':
continue
new_layer.set_weights(old_layer.get_weights())
X_train_gen = sequential_data_generator(self.celltypes, new_assays,
X_train, self.n_genomic_positions, self.batch_size)
if X_valid is not None:
X_valid_gen = data_generator(self.celltypes, new_assays,
X_valid, self.n_genomic_positions, self.batch_size)
history = model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, validation_data=X_valid_gen,
validation_steps=30, verbose=verbose, callbacks=callbacks,
**kwargs)
else:
history = model.fit_generator(X_train_gen, epoch_size, n_epochs,
workers=1, verbose=verbose,
callbacks=callbacks, **kwargs)
for layer in self.model.layers:
if layer.name == 'assay_embedding':
assay_embedding = layer.get_weights()[0]
break
for layer in model.layers:
if layer.name == 'assay_embedding':
new_assay_embedding = layer.get_weights()[0]
break
assay_embedding = numpy.concatenate([assay_embedding,
new_assay_embedding])
self.assays.extend(new_assays)
self.n_assays = len(self.assays)
model = build_model(n_celltypes=self.n_celltypes,
n_celltype_factors=self.n_celltype_factors,
n_assays=self.n_assays,
n_assay_factors=self.n_assay_factors,
n_genomic_positions=self.n_genomic_positions,
n_25bp_factors=self.n_25bp_factors,
n_250bp_factors=self.n_250bp_factors,
n_5kbp_factors=self.n_5kbp_factors,
n_layers=self.n_layers,
n_nodes=self.n_nodes,
freeze_celltypes=self.freeze_celltypes,
freeze_assays=self.freeze_assays,
freeze_genome_25bp=self.freeze_genome_25bp,
freeze_genome_250bp=self.freeze_genome_250bp,
freeze_genome_5kbp=self.freeze_genome_5kbp,
freeze_network=self.freeze_network)
for old_layer, new_layer in zip(self.model.layers, model.layers):
if 'input' in old_layer.name:
continue
if old_layer.name == 'assay_embedding':
new_layer.set_weights([assay_embedding])
else:
new_layer.set_weights(old_layer.get_weights())
model.experiments = self.experiments + list(X_train.keys())
self.model = model
return history
def predict(self, celltype, assay, start=0, end=None, verbose=0):
"""Predict a track of epigenomic data.
This will predict a track of epigenomic data, resulting in one signal
value per genomic position modeled. Users pass in the cell type and
the assay that they wish to impute and receive the track of data.
Parameters
----------
celltype : str
The cell type (aka biosample) to be imputed. Must be one of the
elements from the list of cell types passed in upon model
initialization.
assay : str
The assay to be imputed. Must be one of the elements from the list
of assays passed in upon model initialization.
start : int, optional
The start position to begin the imputation at. By default this is 0,
corresponding to the start of the track. The value is which 25 bp
bin to begin prediction at, not the raw genomic coordinate.
end : int or None, optional
The end position to stop making imputations at, exclusive. By default
this is None, meaning to end at `self.n_genomic_positions.`.
verbose : int, optional
The verbosity level of the prediction. Must be 0 or 1.
Returns
-------
track : numpy.ndarray
A track of epigenomic signal value predictions for the specified
cell type and assay for the considered genomic positions.
"""
if end is not None and end <= start:
raise ValueError("When given, the end coordinate must be greater" \
" than the start coordinate.")
if end is None:
end = self.n_genomic_positions
celltype_idx = self.celltypes.index(celltype)
assay_idx = self.assays.index(assay)
celltype_idxs = numpy.ones(end-start) * celltype_idx
assay_idxs = numpy.ones(end-start) * assay_idx
genomic_25bp_idxs = numpy.arange(start, end)
genomic_250bp_idxs = numpy.arange(start, end) // 10
genomic_5kbp_idxs = numpy.arange(start, end) // 200
X = {
'celltype_input': celltype_idxs,
'assay_input': assay_idxs,
'genome_25bp_input': genomic_25bp_idxs,
'genome_250bp_input': genomic_250bp_idxs,
'genome_5kbp_input': genomic_5kbp_idxs
}
track = self.model.predict(X, batch_size=self.batch_size,
verbose=verbose)[:,0]
return track
def get_params(self):
params = []
for layer in model.layers:
params.append(layers.get_weghts()[0])
def save(self, name="avocado", separators=(',', ' : '), indent=4):
"""Serialize the model to disk.
This function produces two files. The first is a json file that has the
model hyperparameters associated with it. The second is a h5 file that
contains the architecture of the neural network model, the weights, and
the optimizer.
Parameters
----------
name : str, optional
The name to use for the json and the h5 file that are stored.
separators : tuple, optional
The separators to use in the resulting JSON object.
indent : int, optional
The number of spaces to use in the indent of the JSON.
Returns
-------
None
"""
d = {
'celltypes': self.celltypes,
'assays': self.assays,
'experiments': self.experiments,
'n_celltype_factors': self.n_celltype_factors,
'n_assay_factors': self.n_assay_factors,
'n_genomic_positions': self.n_genomic_positions,
'n_25bp_factors': self.n_25bp_factors,
'n_250bp_factors': self.n_250bp_factors,
'n_5kbp_factors': self.n_5kbp_factors,
'n_layers': self.n_layers,
'n_nodes': self.n_nodes,
'batch_size': self.batch_size
}
d = json.dumps(d, separators=separators, indent=indent)
with open("{}.json".format(name), "w") as outfile:
outfile.write(d)
self.model.save("{}.h5".format(name))
def load_weights(self, name, verbose=0):
"""Load serialized weights on a layer-by-layer case.
Load the weights of a pre-saved model on a layer-by-layer case. This
method will iterate through the layers of the serialized model and
this model jointly and set the weights in this model to that of the
serialized model should the weight matrices be of the same size. Should
they not be of the same size it will not modify the current weight
matrix.
A primary use of this function should be after an initial model has been
trained on the Pilot regions and now one is fitting a model to each of
the chromosomes. The size of the genome factors will differ but the other
components will remain the same. Correspondingly, the identically sized
weight matrices are those that should be held constant while the differing
size weight matrices should differ.
Parameters
----------
name : str
The suffix of the name of the weights file.
verbose : int, optional
The verbosity level when loading weights. 0 means silent, 1 means
notify when a weight matrix has been set, 2 means notify what
action has been taken on each layer.
Returns
-------
None
"""
model = keras.models.load_model("{}.h5".format(name))
for i, (self_layer, layer) in enumerate(zip(self.model.layers, model.layers)):
w = layer.get_weights()
w0 = self_layer.get_weights()
name = self_layer.name
if len(w) == 0:
if verbose == 2:
print("{} has no weights to set".format(name))
continue
if w[0].shape != w0[0].shape:
if verbose == 2:
print("{} is of different size and not set".format(name))
continue
self_layer.set_weights(w)
if verbose > 0:
print("{} has been set from serialized model".format(name))
@classmethod
def load(self, name, freeze_celltypes=False, freeze_assays=False,
freeze_genome_25bp=False, freeze_genome_250bp=False,
freeze_genome_5kbp=False, freeze_network=False):
"""Load a model that has been serialized to disk.
The keras model that is saved to disk does not contain any of the
wrapper information
Parameters
----------
name : str
The name of the file to load. There must be both a .json and a
.h5 file with this suffix. For example, if "Avocado" is passed in,
there must be both a "Avocado.json" and a "Avocado.h5" file to
be loaded in.
freeze_celltypes : bool, optional
Whether to freeze the training of the cell type embedding. Default
is False.
freeze_assays : bool, optional
Whether to freeze the training of the assay embeddings. Default
is False.
freeze_genome_25bp : bool, optional
Whether to freeze the training of the 25 bp genome factors. Default
is False.
freeze_genome_250bp : bool, optional
Whether to freeze the training of the 250 bp genome factors. Default
is False.
freeze_genome_5kbp : bool, optional
Whether to freeze the training of the 5 kbp genome factors. Default
is False.
freeze_network : bool, optional
Whether to freeze the training of the neural network. Default
is False.
Returns
-------
model : Avocado
An Avocado model.
"""
with open("{}.json".format(name), "r") as infile:
d = json.load(infile)
if 'experiments' in d:
experiments = d['experiments']
del d['experiments']
else:
experiments = []
model = Avocado(freeze_celltypes=freeze_celltypes,
freeze_assays=freeze_assays,
freeze_genome_25bp=freeze_genome_25bp,
freeze_genome_250bp=freeze_genome_250bp,
freeze_genome_5kbp=freeze_genome_5kbp,
freeze_network=freeze_network,
**d)
model.experiments = experiments
model.model = keras.models.load_model("{}.h5".format(name))
return model
|
caserec/recommenders/item_recommendation/item_attribute_knn.py | khalillakhdhar/recommander_python | 407 | 12683256 | <reponame>khalillakhdhar/recommander_python<gh_stars>100-1000
# coding=utf-8
""""
Item Based Collaborative Filtering Recommender with Attributes (Item Attribute KNN)
[Item Recommendation (Ranking)]
Its philosophy is as follows: in order to determine the rating of User u on item m, we can find other movies that
are similar to item m, and based on User u’s ratings on those similar movies we infer his rating on item m.
However, instead of traditional ItemKNN, this approach uses a metadata or pre-computed similarity matrix.
"""
# © 2019. Case Recommender (MIT License)
from collections import defaultdict
import numpy as np
from caserec.recommenders.item_recommendation.itemknn import ItemKNN
from caserec.utils.process_data import ReadFile
__author__ = '<NAME> <<EMAIL>>'
class ItemAttributeKNN(ItemKNN):
def __init__(self, train_file=None, test_file=None, output_file=None, metadata_file=None, similarity_file=None,
k_neighbors=30, rank_length=10, as_binary=False, as_similar_first=True, metadata_as_binary=False,
metadata_similarity_sep='\t', similarity_metric="cosine", sep='\t', output_sep='\t'):
"""
Item Attribute KNN for Item Recommendation
This algorithm predicts a rank for each user based on the similar items that he/her consumed,
using a metadata or similarity pre-computed file
Usage::
>> ItemAttributeKNN(train, test, similarity_file=sim_matrix, as_similar_first=True).compute()
>> ItemAttributeKNN(train, test, metadata_file=metadata, as_similar_first=True).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns
(item metadata).
:type metadata_file: str, default None
:param similarity_file: File which contains the similarity set. This file needs to have at least 3 columns
(item item similarity).
:type similarity_file: str, default None
:param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users))
:type k_neighbors: int, default None
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param as_binary: If True, the explicit feedback will be transform to binary
:type as_binary: bool, default False
:param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k
most similar users and then take the intersection with the users that
seen that item.
:type as_similar_first: bool, default True
:param metadata_as_binary: f True, the explicit value will be transform to binary
:type metadata_as_binary: bool, default False
:param metadata_similarity_sep: Delimiter for similarity or metadata file
:type metadata_similarity_sep: str, default '\t'
:param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about
distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html
:type similarity_metric: str, default cosine
:param sep: Delimiter for input files file
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(ItemAttributeKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
k_neighbors=k_neighbors, rank_length=rank_length, as_binary=as_binary,
as_similar_first=as_similar_first, similarity_metric=similarity_metric,
sep=sep, output_sep=output_sep)
self.recommender_name = 'Item Attribute KNN Algorithm'
self.metadata_file = metadata_file
self.similarity_file = similarity_file
self.metadata_as_binary = metadata_as_binary
self.metadata_similarity_sep = metadata_similarity_sep
def init_model(self):
"""
Method to fit the model. Create and calculate a similarity matrix by metadata file or a pre-computed similarity
matrix
"""
self.similar_items = defaultdict(list)
# Set the value for k
if self.k_neighbors is None:
self.k_neighbors = int(np.sqrt(len(self.items)))
if self.metadata_file is not None:
metadata = ReadFile(self.metadata_file, sep=self.metadata_similarity_sep, as_binary=self.metadata_as_binary
).read_metadata_or_similarity()
self.matrix = np.zeros((len(self.items), len(metadata['col_2'])))
meta_to_meta_id = {}
for m, data in enumerate(metadata['col_2']):
meta_to_meta_id[data] = m
for item in metadata['col_1']:
for m in metadata['dict'][item]:
self.matrix[self.item_to_item_id[item], meta_to_meta_id[m]] = metadata['dict'][item][m]
# create header info for metadata
sparsity = (1 - (metadata['number_interactions'] / (len(metadata['col_1']) * len(metadata['col_2'])))) * 100
self.extra_info_header = ">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%" % \
(len(metadata['col_1']), len(metadata['col_2']), metadata['number_interactions'],
sparsity)
# Create similarity matrix based on metadata or similarity file. Transpose=False, because it is an
# item x metadata matrix
self.si_matrix = self.compute_similarity(transpose=False)
elif self.similarity_file is not None:
similarity = ReadFile(self.similarity_file, sep=self.metadata_similarity_sep, as_binary=False
).read_metadata_or_similarity()
self.si_matrix = np.zeros((len(self.items), len(self.items)))
# Fill similarity matrix
for i in similarity['col_1']:
for i_j in similarity['dict'][i]:
self.si_matrix[self.item_to_item_id[i], self.item_to_item_id[int(i_j)]] = similarity['dict'][i][i_j]
# Remove NaNs
self.si_matrix[np.isnan(self.si_matrix)] = 0.0
else:
raise ValueError("This algorithm needs a similarity matrix or a metadata file!")
# Create original matrix user x item for prediction process
self.create_matrix()
for i_id, item in enumerate(self.items):
self.similar_items[i_id] = sorted(range(len(self.si_matrix[i_id])),
key=lambda k: -self.si_matrix[i_id][k])[1:self.k_neighbors + 1]
|
tools/misc/profile.py | v8786339/NyuziProcessor | 1,388 | 12683257 | <gh_stars>1000+
#!/usr/bin/env python3
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process sampling profiler output from hardware model.
USAGE: profile <objdump file> <pc dump file>
Prints a breakdown of time spent per function.
- 'objdump file' parameter points to a file that was produced using:
/usr/local/llvm-nyuzi/bin/llvm-objdump -t <path to ELF file>
- 'pc dump file' points to a file that was produced by the verilog model
using +profile=<filename>. It is a list of hexadecimal program counter
samples, one per line.
"""
import sys
import re
symbolre = re.compile(
r'(?P<addr>[A-Fa-f0-9]+) g\s+F\s+\.text\s+[A-Fa-f0-9]+\s+(?P<symbol>\w+)')
def find_function(functions, pc):
"""Given a PC, figure out which function it is in.
Args:
functions: list of (addr: int, name: str)
Returns:
str Name of function.
Raises:
Nothing
"""
low = 0
high = len(functions)
while low < high:
mid = int((low + high) / 2)
if pc < functions[mid][0]:
high = mid
else:
low = mid + 1
if low == len(functions):
return None
return functions[low - 1][1]
def main():
counts = {}
functions = []
# Read symbols
with open(sys.argv[1], 'r') as f:
for line in f.readlines():
got = symbolre.search(line)
if got is not None:
sym = got.group('symbol')
functions += [(int(got.group('addr'), 16), sym)]
counts[sym] = 0
functions.sort(key=lambda a: a[0])
# Read profile trace
with open(sys.argv[2], 'r') as f:
for line in f.readlines():
func = find_function(functions, int(line, 16))
if func is not None:
counts[func] += 1
total_cycles = 0
sorted_tab = []
for name in counts:
sorted_tab += [(counts[name], name)]
total_cycles += counts[name]
for count, name in sorted(sorted_tab, key=lambda func: func[0], reverse=True):
if count == 0:
break
print('{:7d} {:.3f}% {}'.format(count, count / total_cycles * 100, name))
if __name__ == '__main__':
main()
|
mmtbx/command_line/find_residue_in_pdb.py | dperl-sol/cctbx_project | 155 | 12683313 |
from __future__ import absolute_import, division, print_function
from libtbx.utils import Sorry, Usage
import libtbx.phil.command_line
import sys
master_phil = libtbx.phil.parse("""
resname = None
.type = str
d_max = None
.type = float
polymeric_type = *Any Free Polymeric
.type = choice
xray_only = True
.type = bool
data_only = False
.type = bool
identity_cutoff = None
.type = int
quiet = False
.type = bool
""")
def run(args, out=sys.stdout):
if (len(args) == 0) or ("--help" in args):
raise Usage("""mmtbx.find_residue_in_pdb RESNAME [options]
Use the RCSB web services to retrieve a list of PDB structures containing the
specified chemical ID.
Full parameters:
%s
""" % master_phil.as_str(prefix=" "))
sources = []
def process_unknown(arg):
if (1 <= len(arg) <= 3) and (arg.isalnum()):
return libtbx.phil.parse("resname=%s" % arg)
cai = libtbx.phil.command_line.argument_interpreter(master_phil=master_phil)
working_phil = cai.process_and_fetch(args=args,
custom_processor=process_unknown)
params = working_phil.extract()
if (params.resname is None):
raise Sorry("No residue ID specified.")
from mmtbx.wwpdb import rcsb_web_services
pdb_ids = rcsb_web_services.chemical_id_search(
resname=params.resname,
d_max=params.d_max,
polymeric_type=params.polymeric_type,
xray_only=params.xray_only,
data_only=params.data_only,
identity_cutoff=params.identity_cutoff)
pdb_ids = [ id.lower() for id in pdb_ids ]
if (len(pdb_ids) == 0):
raise Sorry("No structures found matching the specified criteria.")
else :
if (not params.quiet):
print("%d PDB IDs retrieved:" % len(pdb_ids), file=out)
i = 0
while (i < len(pdb_ids)):
print(" %s" % " ".join(pdb_ids[i:i+16]), file=out)
i += 16
else :
print("%d PDB IDs matching" % len(pdb_ids), file=out)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
ch16/ch16-part1-self-attention.py | ericgarza70/machine-learning-book | 655 | 12683324 | # coding: utf-8
import sys
from python_environment_check import check_packages
import torch
import torch.nn.functional as F
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'torch': '1.9.0',
}
check_packages(d)
# # Chapter 16: Transformers – Improving Natural Language Processing with Attention Mechanisms (Part 1/3)
# **Outline**
#
# - [Adding an attention mechanism to RNNs](#Adding-an-attention-mechanism-to-RNNs)
# - [Attention helps RNNs with accessing information](#Attention-helps-RNNs-with-accessing-information)
# - [The original attention mechanism for RNNs](#The-original-attention-mechanism-for-RNNs)
# - [Processing the inputs using a bidirectional RNN](#Processing-the-inputs-using-a-bidirectional-RNN)
# - [Generating outputs from context vectors](#Generating-outputs-from-context-vectors)
# - [Computing the attention weights](#Computing-the-attention-weights)
# - [Introducing the self-attention mechanism](#Introducing-the-self-attention-mechanism)
# - [Starting with a basic form of self-attention](#Starting-with-a-basic-form-of-self-attention)
# - [Parameterizing the self-attention mechanism: scaled dot-product attention](#Parameterizing-the-self-attention-mechanism-scaled-dot-product-attention)
# - [Attention is all we need: introducing the original transformer architecture](#Attention-is-all-we-need-introducing-the-original-transformer-architecture)
# - [Encoding context embeddings via multi-head attention](#Encoding-context-embeddings-via-multi-head-attention)
# - [Learning a language model: decoder and masked multi-head attention](#Learning-a-language-model-decoder-and-masked-multi-head-attention)
# - [Implementation details: positional encodings and layer normalization](#Implementation-details-positional-encodings-and-layer-normalization)
# ## Adding an attention mechanism to RNNs
# ### Attention helps RNNs with accessing information
# ### The original attention mechanism for RNNs
# ### Processing the inputs using a bidirectional RNN
# ### Generating outputs from context vectors
# ### Computing the attention weights
# ## Introducing the self-attention mechanism
# ### Starting with a basic form of self-attention
# - Assume we have an input sentence that we encoded via a dictionary, which maps the words to integers as discussed in the RNN chapter:
# input sequence / sentence:
# "Can you help me to translate this sentence"
sentence = torch.tensor(
[0, # can
7, # you
1, # help
2, # me
5, # to
6, # translate
4, # this
3] # sentence
)
sentence
# - Next, assume we have an embedding of the words, i.e., the words are represented as real vectors.
# - Since we have 8 words, there will be 8 vectors. Each vector is 16-dimensional:
torch.manual_seed(123)
embed = torch.nn.Embedding(10, 16)
embedded_sentence = embed(sentence).detach()
embedded_sentence.shape
# - The goal is to compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$.
# - In turn, the attention weights $\alpha_{i j}$ involve the $\omega_{i j}$ values
# - Let's start with the $\omega_{i j}$'s first, which are computed as dot-products:
#
# $$\omega_{i j}=\boldsymbol{x}^{(i)^{\top}} \boldsymbol{x}^{(j)}$$
#
#
omega = torch.empty(8, 8)
for i, x_i in enumerate(embedded_sentence):
for j, x_j in enumerate(embedded_sentence):
omega[i, j] = torch.dot(x_i, x_j)
# - Actually, let's compute this more efficiently by replacing the nested for-loops with a matrix multiplication:
omega_mat = embedded_sentence.matmul(embedded_sentence.T)
torch.allclose(omega_mat, omega)
# - Next, let's compute the attention weights by normalizing the "omega" values so they sum to 1
#
# $$\alpha_{i j}=\frac{\exp \left(\omega_{i j}\right)}{\sum_{j=1}^{T} \exp \left(\omega_{i j}\right)}=\operatorname{softmax}\left(\left[\omega_{i j}\right]_{j=1 \ldots T}\right)$$
#
# $$\sum_{j=1}^{T} \alpha_{i j}=1$$
attention_weights = F.softmax(omega, dim=1)
attention_weights.shape
# - We can conform that the columns sum up to one:
attention_weights.sum(dim=1)
# - Now that we have the attention weights, we can compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$
# - For instance, to compute the context-vector of the 2nd input element (the element at index 1), we can perform the following computation:
x_2 = embedded_sentence[1, :]
context_vec_2 = torch.zeros(x_2.shape)
for j in range(8):
x_j = embedded_sentence[j, :]
context_vec_2 += attention_weights[1, j] * x_j
context_vec_2
# - Or, more effiently, using linear algebra and matrix multiplication:
context_vectors = torch.matmul(
attention_weights, embedded_sentence)
torch.allclose(context_vec_2, context_vectors[1])
# ### Parameterizing the self-attention mechanism: scaled dot-product attention
torch.manual_seed(123)
d = embedded_sentence.shape[1]
U_query = torch.rand(d, d)
U_key = torch.rand(d, d)
U_value = torch.rand(d, d)
x_2 = embedded_sentence[1]
query_2 = U_query.matmul(x_2)
key_2 = U_key.matmul(x_2)
value_2 = U_value.matmul(x_2)
keys = U_key.matmul(embedded_sentence.T).T
torch.allclose(key_2, keys[1])
values = U_value.matmul(embedded_sentence.T).T
torch.allclose(value_2, values[1])
omega_23 = query_2.dot(keys[2])
omega_23
omega_2 = query_2.matmul(keys.T)
omega_2
attention_weights_2 = F.softmax(omega_2 / d**0.5, dim=0)
attention_weights_2
#context_vector_2nd = torch.zeros(values[1, :].shape)
#for j in range(8):
# context_vector_2nd += attention_weights_2[j] * values[j, :]
#context_vector_2nd
context_vector_2 = attention_weights_2.matmul(values)
context_vector_2
# ## Attention is all we need: introducing the original transformer architecture
# ### Encoding context embeddings via multi-head attention
torch.manual_seed(123)
d = embedded_sentence.shape[1]
one_U_query = torch.rand(d, d)
h = 8
multihead_U_query = torch.rand(h, d, d)
multihead_U_key = torch.rand(h, d, d)
multihead_U_value = torch.rand(h, d, d)
multihead_query_2 = multihead_U_query.matmul(x_2)
multihead_query_2.shape
multihead_key_2 = multihead_U_key.matmul(x_2)
multihead_value_2 = multihead_U_value.matmul(x_2)
multihead_key_2[2]
stacked_inputs = embedded_sentence.T.repeat(8, 1, 1)
stacked_inputs.shape
multihead_keys = torch.bmm(multihead_U_key, stacked_inputs)
multihead_keys.shape
multihead_keys = multihead_keys.permute(0, 2, 1)
multihead_keys.shape
multihead_keys[2, 1] # index: [2nd attention head, 2nd key]
multihead_values = torch.matmul(multihead_U_value, stacked_inputs)
multihead_values = multihead_values.permute(0, 2, 1)
multihead_z_2 = torch.rand(8, 16)
linear = torch.nn.Linear(8*16, 16)
context_vector_2 = linear(multihead_z_2.flatten())
context_vector_2.shape
# ### Learning a language model: decoder and masked multi-head attention
# ### Implementation details: positional encodings and layer normalization
# ---
#
# Readers may ignore the next cell.
|
simfin/datasets.py | tom3131/simfin | 231 | 12683362 | ##########################################################################
#
# Functions and classes for iterating over and loading all datasets,
# variants and markets that are available for bulk download from SimFin.
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import simfin as sf
from simfin.load_info import load_info_datasets
from collections import defaultdict
from functools import partial, lru_cache
import sys
##########################################################################
# Lists of dataset names.
@lru_cache()
def datasets_all():
"""
Return a list of strings with the names of all available datasets.
"""
# Load dict with info about all the datasets.
info_datasets = load_info_datasets()
# Create a list of just the dataset names.
datasets = list(info_datasets)
return datasets
@lru_cache()
def datasets_startswith(names):
"""
Return a list of strings with dataset names that begin with the given
names.
:param names:
String or tuple of strings.
:return:
List of strings.
"""
# Load dict with info about all the datasets.
info_datasets = load_info_datasets()
# Create a list of just the dataset names.
datasets = list(info_datasets)
# Filter the datasets so we only get the ones that start with these names.
datasets = list(filter(lambda s: s.startswith(names), datasets))
return datasets
# List of dataset names that begin with 'income'.
datasets_income = partial(datasets_startswith, names='income')
datasets_income.__doc__ = 'List of dataset names that begin with \'income\'.'
# List of dataset names that begin with 'balance'.
datasets_balance = partial(datasets_startswith, names='balance')
datasets_balance.__doc__ = 'List of dataset names that begin with \'balance\'.'
# List of dataset names that begin with 'cashflow'.
datasets_cashflow = partial(datasets_startswith, names='cashflow')
datasets_cashflow.__doc__ = 'List of dataset names that begin with \'cashflow\'.'
# List of dataset names that begin with either 'income', 'balance' or 'cashflow'.
datasets_fundamental = partial(datasets_startswith,
names=('income', 'balance', 'cashflow'))
datasets_fundamental.__doc__ = 'List of dataset names with fundamental data.'
# List of dataset names that begin with 'shareprices'.
datasets_shareprices = partial(datasets_startswith, names='shareprices')
datasets_shareprices.__doc__ = 'List of dataset names that begin with \'shareprices\'.'
# List of dataset names that begin with 'derived'.
datasets_derived = partial(datasets_startswith, names='derived')
datasets_derived.__doc__ = 'List of dataset names that begin with \'derived\'.'
##########################################################################
# Functions for iterating over and loading all datasets.
def iter_all_datasets(datasets=None):
"""
Create a generator for iterating over all valid datasets, variants and
markets. For example:
.. code-block:: python
for dataset, variant, market in iter_all_datasets():
print(dataset, variant, market)
This only yields the names of the datasets, variants and markets, not the
actual Pandas DataFrames, use :obj:`~simfin.datasets.load_all_datasets`
or the :obj:`~simfin.datasets.AllDatasets` class for that.
:param datasets:
If `None` then iterate over all datasets. Otherwise if this is a string
or list of strings, then only iterate over these datasets.
"""
# Load dict with info about all the datasets.
info_datasets = load_info_datasets()
# Only use the given datasets?
if datasets is not None:
# Create a new dict which only contains the given datasets.
info_datasets = {k: v for k, v in info_datasets.items()
if k in datasets}
# Yield all valid combinations of datasets, variants and markets.
for dataset, x in info_datasets.items():
# If the list of variants is empty, use a list with None,
# otherwise the for-loop below would not yield anything.
if len(x['variants']) > 0:
variants = x['variants']
else:
variants = [None]
# If the list of markets is empty, use a list with None,
# otherwise the for-loop below would not yield anything.
if len(x['markets']) > 0:
markets = x['markets']
else:
markets = [None]
for variant in variants:
for market in markets:
yield dataset, variant, market
def load_all_datasets(**kwargs):
"""
Load all datasets and variants. Create and return a nested
dict for fast lookup given dataset, variant and market names.
Accepts the same args as the :obj:`~simfin.load.load` function, except for
dataset, variant and market. For example, `refresh_days` can be set
to 0 to ensure all datasets are downloaded again, which is
useful for testing purposes.
:return:
Nested dict `dfs` with all datasets, variants and markets.
Example: `dfs['income']['annual']['us']` is the dataset for
annual Income Statements for the US market.
"""
# Initialize a dict that can be nested to any depth.
dfs = defaultdict(lambda: defaultdict(dict))
# For all possible datasets, variants and markets.
for dataset, variant, market in iter_all_datasets():
try:
# Load the dataset and variant as a Pandas DataFrame.
df = sf.load(dataset=dataset, variant=variant, market=market,
**kwargs)
# Add the Pandas DataFrame to the nested dict.
dfs[dataset][variant][market] = df
except Exception as e:
# Exceptions can occur e.g. if the API key is invalid, or if there
# is another server error, or if there is no internet connection.
# Print the exception and continue.
print(e, file=sys.stderr)
# Set the Pandas DataFrame to None in the nested dict,
# to indicate that it could not be loaded.
dfs[dataset][variant][market] = None
# Return the nested dict. It is a bit tricky to convert the
# defaultdict to a normal dict, and it is not really needed,
# so just return the defaultdict as it is.
return dfs
##########################################################################
class AllDatasets:
"""
Load all valid datasets, variants and markets as Pandas DataFrames.
Also provide functions for easy lookup and iteration over datasets.
"""
def __init__(self, **kwargs):
"""
Accepts the same args as the :obj:`~simfin.load.load` function, except
for dataset, variant and market. For example, `refresh_days` can be
set to 0 to ensure all datasets are downloaded again, which is
useful for testing purposes.
"""
# Load all datasets into a nested dict-dict.
self._dfs = load_all_datasets(**kwargs)
def get(self, dataset, variant=None, market=None):
"""
Return the Pandas DataFrame for a single dataset, variant and market.
:param dataset: String with the dataset name.
:param variant: String with the dataset's variant.
:param market: String with the dataset's market.
:return: Pandas DataFrame with the dataset.
"""
return self._dfs[dataset][variant][market]
def iter(self, datasets=None, variants=None, markets=None):
"""
Iterate over all valid datasets, variants and markets,
or only use the ones specified. For example:
.. code-block:: python
for dataset, variant, market, df in all_datasets.iter():
# dataset, variant and market are strings with the names.
# df is a Pandas DataFrame with the actual data.
:param datasets:
Default is `None` which uses all valid datasets.
Otherwise a list of strings with the dataset-names to use.
:param variants:
Default is `None` which uses all valid variants for a dataset.
Otherwise a list of strings with the variant-names to use.
:param markets:
Default is `None` which uses all valid markets for a dataset.
Otherwise a list of strings with the market-names to use.
:return:
Generator which iterates over:
dataset (string), variant (string), market (string), df (Pandas DataFrame)
"""
# Load dict with info about all the datasets.
info_datasets = load_info_datasets()
# Use provided or all datasets?
if datasets is None:
datasets = datasets_all
# For all datasets.
for dataset in datasets:
# Use provided or all valid variants for this dataset?
if variants is not None:
_variants = variants
else:
_variants = info_datasets[dataset]['variants']
# Use provided or all valid markets for this dataset?
if markets is not None:
_markets = markets
else:
_markets = info_datasets[dataset]['markets']
# For all the selected variants and markets.
for variant in _variants:
for market in _markets:
# Get the Pandas DataFrame with the actual data.
df = self.get(dataset=dataset, variant=variant, market=market)
# Yield all the strings and the Pandas DataFrame.
yield dataset, variant, market, df
##########################################################################
|
colour/models/rgb/datasets/nikon_n_gamut.py | rift-labs-developer/colour | 1,380 | 12683370 | # -*- coding: utf-8 -*-
"""
Nikon N-Gamut Colourspace
=========================
Defines the *Nikon N-Gamut* colourspace:
- :attr:`colour.models.RGB_COLOURSPACE_N_GAMUT`.
References
----------
- :cite:`Nikon2018` : Nikon. (2018). N-Log Specification Document - Version
1.0.0 (pp. 1-5). Retrieved September 9, 2019, from
http://download.nikonimglib.com/archive3/hDCmK00m9JDI03RPruD74xpoU905/\
N-Log_Specification_(En)01.pdf
"""
from colour.models.rgb import (RGB_Colourspace, log_encoding_NLog,
log_decoding_NLog)
from colour.models.rgb.datasets.itur_bt_2020 import (
PRIMARIES_BT2020, WHITEPOINT_NAME_BT2020, CCS_WHITEPOINT_BT2020,
MATRIX_BT2020_TO_XYZ, MATRIX_XYZ_TO_BT2020)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'PRIMARIES_N_GAMUT', 'WHITEPOINT_NAME_N_GAMUT', 'CCS_WHITEPOINT_N_GAMUT',
'MATRIX_N_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_N_GAMUT', 'RGB_COLOURSPACE_N_GAMUT'
]
PRIMARIES_N_GAMUT = PRIMARIES_BT2020
"""
*Nikon N-Gamut* colourspace primaries.
Notes
-----
The *Nikon N-Gamut* colourspace gamut is same as the "ITU-R BT.2020" wide
colour gamut.
PRIMARIES_N_GAMUT : ndarray, (3, 2)
"""
WHITEPOINT_NAME_N_GAMUT = WHITEPOINT_NAME_BT2020
"""
*Nikon N-Gamut* colourspace whitepoint name.
WHITEPOINT_NAME_N_GAMUT : unicode
"""
CCS_WHITEPOINT_N_GAMUT = CCS_WHITEPOINT_BT2020
"""
*Nikon N-Gamut* colourspace whitepoint.
CCS_WHITEPOINT_N_GAMUT : ndarray
"""
MATRIX_N_GAMUT_TO_XYZ = MATRIX_BT2020_TO_XYZ
"""
*Nikon N-Gamut* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_N_GAMUT_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_N_GAMUT = MATRIX_XYZ_TO_BT2020
"""
*CIE XYZ* tristimulus values to *Nikon N-Gamut* colourspace matrix.
MATRIX_XYZ_TO_N_GAMUT : array_like, (3, 3)
"""
RGB_COLOURSPACE_N_GAMUT = RGB_Colourspace(
'N-Gamut',
PRIMARIES_N_GAMUT,
CCS_WHITEPOINT_N_GAMUT,
WHITEPOINT_NAME_N_GAMUT,
MATRIX_N_GAMUT_TO_XYZ,
MATRIX_XYZ_TO_N_GAMUT,
log_encoding_NLog,
log_decoding_NLog,
)
RGB_COLOURSPACE_N_GAMUT.__doc__ = """
*Nikon N-Gamut* colourspace.
References
----------
:cite:`Nikon2018`
RGB_COLOURSPACE_N_GAMUT : RGB_Colourspace
"""
|
prediction_flow/transformers/column/base.py | dydcfg/prediction-flow | 211 | 12683393 | """
Base class for all column-orientation transformer classes
with fit/transform functions.
"""
# Authors: <NAME>
# License: MIT
from abc import ABC, abstractmethod
from enum import Enum
class Column(ABC):
"""Base class for all column-orientation transformer classes
with fit/transform functions.
"""
@abstractmethod
def fit(self, x, y=None):
"""Fit this transformer.
Parameters
----------
x : array-like
One column of training data.
y : array-like, default=None
Training targets.
"""
raise NotImplementedError
@abstractmethod
def transform(self, x):
"""Transform x by this fitted transformer.
Parameters
----------
x : array-like
Column data to be transformed.
"""
raise NotImplementedError
class ColumnType(Enum):
NUMBER = 1
CATEGORY = 2
SEQUENCE = 3
class NumberColumn(Column):
"""Base class for all column-orientation number type transformer classes
with fit/transform functions.
"""
column_type = ColumnType.NUMBER
class CategoryColumn(Column):
"""Base class for all column-orientation category type transformer classes
with fit/transform functions.
"""
column_type = ColumnType.CATEGORY
@abstractmethod
def dimension(self):
"""Number of unique terms.
"""
raise NotImplementedError
class SequenceColumn(Column):
"""Base class for all column-orientation sequence type transformer classes
with fit/transform functions.
"""
column_type = ColumnType.SEQUENCE
@abstractmethod
def dimension(self):
"""Number of unique terms.
"""
raise NotImplementedError
@abstractmethod
def max_length(self):
"""Maximum length of one sequence.
"""
raise NotImplementedError
|
codes/models/archs/dcn/__init__.py | Johnson-yue/mmsr | 130 | 12683398 | from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, ModulatedDeformConvPack,
deform_conv, modulated_deform_conv)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'deform_conv',
'modulated_deform_conv'
]
|
aix360/data/ted_data/GenerateData.py | Qingtian-Zou/AIX360 | 609 | 12683400 | # This file will generate a synthetic dataset to predict employee attrition
# Like most datasets it will have a feature vector and a Y label for each instance.
# However, unlike most datasets it will also have an Explanation (E) for each instance, encoded as an non-negative integer.
# This is motivated by the TED framework, but can be used by other explainability algorithms as a metric for explainability
# See the AIES'19 paper by Hind et al for more information on the TED framework.
# See the tutorial notebook TED_Cartesian_test for information about how to use this dataset and the TED framework.
# The comments in this code also provide some insight into how this dataset is generated
import random
from random import choices
import pandas as pd
Any = -99 # This is only applicable in the rule
Low = -1 # These 3, Low, Med, High, can be values in the dataset and are used in the rules
Med = -2
High = -3
Yes = -10 # This is the positive Y label
No = -11 # This is the negative Y label
Random = -12 # This signifies a random choice should be made for the Y label (either Yes or No) ]
# Features, values, and distribution, details below
featureThresholds = [
# 1 Position: 4(5%), 3(20%), 2(30%), 1(45%)
[4, [0.05, 0.20, 0.30, 0.45]],
# 2 Organization "Org": 3(30%); 2(30%); 1(40%)
[3, [0.30, 0.30, 0.40]],
# 3 Potential "Pot": Yes (50%), No (50%)
[2, [0.50, 0.50]],
# 4 Rating value "Rat": High(15%), Med(80%), Low(5%)
[3, [0.15, 0.80, 0.05]],
# 5 Rating Slope "Slope": High (15%), Med(80%), Low(5%)
[3, [0.15, 0.80, 0.05]],
# 6 Salary Competitiveness "Sal": High (10%); Med(70%); Low(20%)
[3, [0.10, 0.70, 0.20]],
# 7 Tenure Low "TenL" & High Values "TenH": [0..360], 30% in 0..24; 30% in 25..60; 40% in 61..360
[3, [0.30, 0.30, 0.40], [[0, 24], [25, 60], [61, 360]]],
# 8 Position Tenure Low "BTenL" & High Values "BTenH": [0..360], 70% in 0..12; 20% in 13..24; 10% in 25..360
# Position tenure needs to be lower than tenure, ensured in generation code below
[3, [0.70, 0.20, 0.10], [[0, 12], [13, 24], [25, 360]]]
]
# Some convenient population lists
HighMedLowPopulation = [High, Med, Low]
YesNoPopulation = [Yes, No]
Index3Population = [0, 1, 2]
Integer4Population = [4, 3, 2, 1]
Integer3Population = [3, 2, 1]
# Rules used to label a feature vector with a label and an explanation
# Format: features, label, explanation #, Explanation String
RetentionRules = [
#POS ORG Pot RAT Slope SALC TENL H BTEN LH
[Any, 1, Any, High, Any, Low, Any, Any, Any, Any, #0
Yes, 2, "Seeking Higher Salary in Org 1"],
[1, 1, Any, Any, Any, Any, Any, Any, 15, Any, #1
Yes, 3, "Promotion Lag, Org 1, Position 1"],
[2, 1, Any, Any, Any, Any, Any, Any, 15, Any, #2
Yes, 3, "Promotion Lag, Org 1, Position 2"],
[3, 1, Any, Any, Any, Any, Any, Any, 15, Any, #3
Yes, 3, "Promotion Lag, Org 1, Position 3"],
[1, 2, Any, Any, Any, Any, Any, Any, 20, Any, #4
Yes, 4, "Promotion Lag, Org 2, Position 1"],
[2, 2, Any, Any, Any, Any, Any, Any, 20, Any, #5
Yes, 4, "Promotion Lag, Org 2, Position 2"],
[3, 2, Any, Any, Any, Any, Any, Any, 30, Any, #6
Yes, 5, "Promotion Lag, Org 2, Position 3"],
[1, 3, Any, Any, Any, Any, Any, Any, 20, Any, #7
Yes, 6, "Promotion Lag, Org 3, Position 1"],
[2, 3, Any, Any, Any, Any, Any, Any, 30, Any, #8
Yes, 7, "Promotion Lag, Org 3, Position 2"],
[3, 3, Any, Any, Any, Any, Any, Any, 30, Any, #9
Yes, 7, "Promotion Lag, Org 3, Position 3"],
[1, 1, Any, Any, Any, Any, 0, 12, Any, Any, #10
Yes, 8, "New employee, Org 1, Position 1"],
[2, 1, Any, Any, Any, Any, 0, 12, Any, Any, #11
Yes, 8, "New employee, Org 1, Position 2"],
[3, 1, Any, Any, Any, Any, 0, 30, Any, Any, #12
Yes, 9, "New employee, Org 1, Position 3"],
[1, 2, Any, Any, Any, Any, 0, 24, Any, Any, #13
Yes, 10, "New employee, Org 2, Position 1"],
[2, 2, Any, Any, Any, Any, 0, 30, Any, Any, #14
Yes, 11, "New employee, Org 2, Position 2"],
[Any, 1, Any, Low, High, Any, Any, Any, Any, Any, #15
Yes, 13, "Disappointing evaluation, Org 1"],
[Any, 2, Any, Low, High, Any, Any, Any, Any, Any, #16
Yes, 14, "Disappointing evaluation, Org 2"],
[Any, Any, Yes, Med, High, Low, Any, Any, Any, Any, #17
Yes, 15, "Compensation doesn't match evaluations, Med rating"],
[Any, Any, Yes, High, High, Low, Any, Any, Any, Any, #18
Yes, 15, "Compensation doesn't match evaluations, High rating"],
[Any, 1, Yes, Med, High, Med, Any, Any, Any, Any, #19
Yes, 16, "Compensation doesn't match evaluations, Org 1, Med rating"],
[Any, 2, Yes, Med, High, Med, Any, Any, Any, Any, #20
Yes, 16, "Compensation doesn't match evaluations, Org 2, Med rating"],
[Any, 1, Yes, High, High, Med, Any, Any, Any, Any, #21
Yes, 16, "Compensation doesn't match evaluations, Org 1, High rating"],
[Any, 2, Yes, High, High, Med, Any, Any, Any, Any, #22
Yes, 16, "Compensation doesn't match evaluations, Org 2, High rating"],
[Any, 1, Any, Any, Med, Med, 120, 180, Any, Any, #23
Yes, 17, "Mid-career crisis, Org 1"],
[Any, 2, Yes, Any, Any, Med, 130, 190, Any, Any, #24
Yes, 18, "Mid-career crisis, Org 2"]
]
def ruleValToString(val):
""" Convert the value passed into a string """
if val == Any :
return "Any"
elif val == Low :
return "Low"
elif val == Med :
return "Med"
elif val == High :
return "High"
elif val == Yes :
return "Yes"
elif val == No :
return "No"
elif val == Random :
return "Random"
else :
return str(val)
def printFeatureStringHeader() :
""" Print the feature headings """
print(" Feature Headings")
print("[Pos, Org, Pot, Rating, Slope, Salary Competitiveness, Tenure, Position Tenure]")
def featuresToString(featureVector) :
""" Convert a feature vector into is string format"""
val = "["
for i in range(0, 2) : # These features are just ints, Position, Organization
val += str(featureVector[i])
val += " "
for i in range(2, 6) : # show encoding for these: Potential, Rating, Rating Slope, Salary Competitiveness
val += ruleValToString(featureVector[i])
val += " "
for i in range(6, 8) : # These features are just ints: Tenure and Position Tenure
val += str(featureVector[i])
val += " "
val += "]"
return val
def printRule(rule) :
""" Print the passed rule """
print("Rule: ", end='')
for i in rule[0:1]: # ints or Any: Position and Organization
if i == Any:
print(ruleValToString(i) + ", ", end='')
for i in rule[2:5]: # encoded: Potentional, Rating, Rating Slope, Salary Competitiveness
print(ruleValToString(i) + ", ", end='')
for i in rule[6:9]: # next 4 are ints or ANY: Tenure Low, Tenure High, Position Tenure Low, Position Tenure High
if i == Any :
print(ruleValToString(i) + ", ", end='')
else :
print(str(i) + ", ", end='')
print("==> "+ ruleValToString(rule[10]) + "[" + str(rule[11]) + "] " + str(rule[12]))
def printRules(rules) :
""" print all rules"""
for r in rules:
printRule(r)
########################################################################
def chooseRangeValue(thresholds, rangeList):
""" Generate a random value based on the probability weights (thresholds) and list of ranges passed
Args:
thresholds : list of probabilities for each choice
rangeList: a list of pair lists giving the lower and upper bounds to choose value from
"""
# pick a number 1..3 from weights
rangeVal = choices(Index3Population, thresholds)
# get the appropriate range given rangeVal
interval = rangeList[rangeVal[0]]
# construct a population list from the result
intervalPopulation = list(range(interval[0], interval[1]))
# construct a equally prob weights list
numElements = interval[1] - interval[0]
probVal = 1.0 / numElements
probList = [probVal] * numElements
# now choose the value from the population based on the weights
val = choices(intervalPopulation, probList)
return val[0]
def chooseValueAndAppend(instance, population, weights) :
""" Choose a random value from the population using weights list and append it to the passed instance
"""
val = choices(population, weights)
instance.append(val[0])
def generateFeatures(numInstances) :
""" generate the features (X) values for the dataset
Args:
numInstances (int) : number of instances to genreate
Returns:
dataset (list of lists) : the dataset with features, but no labels or explanations yet
"""
assert(numInstances > 0)
dataset = []
for i in range(numInstances) :
instance = []
#POS ORG Pot Rating Slope SALC TENL H BTEN LH
chooseValueAndAppend(instance, Integer4Population, featureThresholds[0][1]) # Position
chooseValueAndAppend(instance, Integer3Population, featureThresholds[1][1]) # Org
chooseValueAndAppend(instance, YesNoPopulation, featureThresholds[2][1]) # Potential
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[3][1]) # Rating
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[4][1]) # Rating slope
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[5][1]) # Sal competitiveness
val1 = chooseRangeValue(featureThresholds[6][1], featureThresholds[6][2]) # Tenure
instance.append(val1)
# Position tenure needs to be <= Tenure
val2 = chooseRangeValue(featureThresholds[7][1], featureThresholds[7][2]) # Pos Tenure
if val2 > val1 :
val2 = val1
instance.append(val2)
dataset.append(instance)
return dataset
#####################################################################################################
def match(ruleVal, featureVal) :
""" Check if passed ruleVal matches the featureVal or if ruleVal is Any, which matches everything
"""
# print("Match called: "+ ruleValToString(ruleVal) + " " + ruleValToString(featureVal))
if ruleVal == Any :
return True
return (ruleVal == featureVal)
def intervalMatch(ruleValLower, ruleValUpper, featureVal) :
""" Check to see if featureVal is in the interval defined by [ruleValLower, ruleValUpper)
"""
# Any in lower bound matches all values, (upper bound doesn't matter)
if ruleValLower == Any :
return True
if ruleValLower <= featureVal :
# Any in upper bound means infinitity
if featureVal < ruleValUpper or ruleValUpper == Any :
return True
return False
def ruleMatch(rule, featureVector) :
""" Determine if the passed featureVector matches the passed rule
"""
if (False) :
print("ruleMatch called, ", end="")
printRule(rule)
print(" feature vector: " + featuresToString(featureVector) )
for i in range(0, 6) : # loop over first 6 features, 0..5
if not match(rule[i], featureVector[i]) : # if we don't find a feature match, the rule doesn't match
# print("Didn't match feature #", i, ruleValToString(featureVector[i]))
return False
# These features are interval-based, so need a different matching routine
if not intervalMatch(rule[6], rule[7], featureVector[6]) : # rule[6] and rule[7] have the lower and upper bounds of interval
# print("Didn't match feature # 6: ", featureVector[6])
return False
if not intervalMatch(rule[8], rule[9], featureVector[7]) : # rule[8] and rule[9] have the lower and upper bounds of interval
# print("Didn't match feature # 7: ", featureVector[7])
return False
# print("Matched all features")
return True # if we didn't find a non-match by now, we found a match
def findRule(instance, ruleSet) :
""" find the rule(s) that matches the feture vector passed
"""
# print("*Looking for rule match for Feature vector: " + featuresToString(instance))
ruleNumber = 0 # counter to track rule number
ruleMatches = [] # will hold all rule numbers that matched
for rule in ruleSet :
if (ruleMatch(rule, instance)) :
ruleMatches.append(ruleNumber)
counts[ruleNumber] += 1 # update global histogram of rule matches for stats reporting
if (False) :
print(" ruleMatch found at rule #" + str(ruleNumber))
print(" ", end="")
printRule(rule)
ruleNumber += 1
return ruleMatches
def countAnys(rule) :
""" Count the number of Anys in the passed rule. An "Any" is a wildcard that matches all values
"""
count = 0
for feature in RetentionRules[rule] :
if feature == Any :
count += 1
return count
def pickBestRule(ruleList) :
""" Choose the rule with the least number of Any's in it
"""
assert(len(ruleList) > 0)
# print("ruleList: ", ruleList)
minAnys = len(RetentionRules[0]) + 1 # initialize to a value larger than possible # of Anys in a rule
bestRule = -1
for rule in ruleList :
# Count # of Any's in rule # rule
count = countAnys(rule)
if count < minAnys :
minAnys = count
bestRule = rule
assert(bestRule != -1) # We should find a best rule
return bestRule
def addLabelsAndExplanations(dataset, rules) :
""" This function will use a ruleset to add labels (Y) and explanations/rules (E) to a passed dataset
Arg:
dataset (list of lists) : a list of feature vectors (list)
rules (list of lists) : a list of rules
"""
noMatches = 0 # Counters to record how often there are no (Yes) matches, 1 (Yes) match, and multiple (Yes) matches
multiMatches = 0
oneMatches = 0
for instance in dataset :
ruleMatches = findRule(instance, rules)
if len(ruleMatches) == 0 : # We didn't match a (Yes) rule, so this ia No situation
rule = NoRiskRuleNum
label = No
noMatches +=1
elif len(ruleMatches) > 1 : # Matched multiple Yes rules, need to pick one
rule = pickBestRule(ruleMatches)
assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid
label = Yes
multiMatches += 1
else : # Found 1 Yes rule match, it's the winner
rule = ruleMatches[0]
label = Yes
oneMatches += 1
assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid
# print("Label: " + ruleValToString(label) + ", Rule: " + ruleValToString(rule))
instance.append(label)
instance.append(rule) # add the label and explanation (rule #) to the featureVector
if (True) :
print("\nRule matching statistics: ")
totalYes = oneMatches + multiMatches
total = oneMatches + multiMatches + noMatches
print(" Yes Labels: {}/{} ({:.2f}%)".format(totalYes, total, totalYes/total*100))
print(" Matched 1 Yes rule: {}/{} ({:.2f}%)".format(oneMatches, totalYes, oneMatches/totalYes*100))
print(" Matched multiple Yes rules: {}/{} ({:.2f}%)".format(multiMatches, totalYes, multiMatches/totalYes*100))
print(" No Laels: {}/{} ({:.2f}%)".format(noMatches, total, noMatches/total*100))
def printRuleUsage(counts, total) :
print("\nHistogram of rule usage:")
ruleNum = 0
for num in counts :
print(" Rule {} was used {} times, {:.2f}%".format(ruleNum, num, num/total*100))
ruleNum += 1
numRentionRules = len(RetentionRules)
counts = [0]*numRentionRules
NoRiskRuleNum = numRentionRules # the No Risk to leave rule is 1 more than than the total rules [0..]
random.seed(1)
# printFeatureStringHeader()
numInstances = 10000
dataset = generateFeatures(numInstances)
addLabelsAndExplanations(dataset, RetentionRules)
printRuleUsage(counts, numInstances)
# insert TED headers
NumFeatures = len(featureThresholds)
header = list(range(NumFeatures))
header.append("Y")
header.append("E")
dataset.insert(0, header)
# write to csv file
my_df = pd.DataFrame(dataset)
my_df.to_csv('Retention.csv', index=False, header=False)
|
graphene_django/tests/test_schema.py | mebel-akvareli/graphene-django | 4,038 | 12683402 | from py.test import raises
from ..registry import Registry
from ..types import DjangoObjectType
from .models import Reporter
def test_should_raise_if_no_model():
with raises(Exception) as excinfo:
class Character1(DjangoObjectType):
fields = "__all__"
assert "valid Django Model" in str(excinfo.value)
def test_should_raise_if_model_is_invalid():
with raises(Exception) as excinfo:
class Character2(DjangoObjectType):
class Meta:
model = 1
fields = "__all__"
assert "valid Django Model" in str(excinfo.value)
def test_should_map_fields_correctly():
class ReporterType2(DjangoObjectType):
class Meta:
model = Reporter
registry = Registry()
fields = "__all__"
fields = list(ReporterType2._meta.fields.keys())
assert fields[:-2] == [
"id",
"first_name",
"last_name",
"email",
"pets",
"a_choice",
"reporter_type",
]
assert sorted(fields[-2:]) == ["articles", "films"]
def test_should_map_only_few_fields():
class Reporter2(DjangoObjectType):
class Meta:
model = Reporter
fields = ("id", "email")
assert list(Reporter2._meta.fields.keys()) == ["id", "email"]
|
doc/examples/bench_command.py | Tada-Project/pyperf | 225 | 12683409 | #!/usr/bin/env python3
import sys
import pyperf
runner = pyperf.Runner()
runner.bench_command('python_startup', [sys.executable, '-c', 'pass'])
|
library/mmap_test.py | creativemindplus/skybison | 278 | 12683412 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import mmap
import os
import tempfile
import unittest
class MmapTest(unittest.TestCase):
def test_new_with_wrong_class_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(list, -1, 1)
self.assertIn("is not a sub", str(context.exception))
def test_new_with_non_int_fileno_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, "not-int", 1)
self.assertIn("str", str(context.exception))
def test_new_with_non_int_length_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, -1, "not-int")
self.assertIn("str", str(context.exception))
def test_new_with_non_int_flags_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, flags="not-int")
self.assertIn("str", str(context.exception))
def test_new_with_non_int_prot_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, prot="not-int")
self.assertIn("str", str(context.exception))
def test_new_with_non_int_access_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, access="not-int")
self.assertIn("str", str(context.exception))
def test_new_with_non_int_offset_raises_type_error(self):
with self.assertRaises(TypeError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, offset="not-int")
self.assertIn("str", str(context.exception))
def test_new_with_negative_len_raises_overflow_error(self):
with self.assertRaises(OverflowError) as context:
mmap.mmap.__new__(mmap.mmap, -1, -1)
self.assertEqual(
"memory mapped length must be positive", str(context.exception)
)
def test_new_with_negative_offset_raises_overflow_error(self):
with self.assertRaises(OverflowError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, offset=-1)
self.assertEqual(
"memory mapped offset must be positive", str(context.exception)
)
def test_new_that_sets_both_access_and_flags_raises_value_error(self):
with self.assertRaises(ValueError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, flags=-1, access=1)
self.assertEqual(
"mmap can't specify both access and flags, prot.", str(context.exception)
)
def test_new_that_sets_both_access_and_prot_raises_value_error(self):
with self.assertRaises(ValueError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, prot=-1, access=1)
self.assertEqual(
"mmap can't specify both access and flags, prot.", str(context.exception)
)
def test_new_that_sets_invalid_access_raises_value_error(self):
with self.assertRaises(ValueError) as context:
mmap.mmap.__new__(mmap.mmap, -1, 1, access=-1)
self.assertEqual("mmap invalid access parameter.", str(context.exception))
def test_anonymous_mmap_can_be_closed(self):
m = mmap.mmap(-1, 1)
m.close()
def test_mmap_of_empty_file_raises_value_error(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
with self.assertRaises(ValueError) as context:
mmap.mmap(fd, 0)
self.assertEqual("cannot mmap an empty file", str(context.exception))
os.close(fd)
def test_mmap_of_file_with_bigger_offset_raises_value_error(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
with self.assertRaises(ValueError) as context:
mmap.mmap(fd, 0, offset=10)
self.assertEqual(
"mmap offset is greater than file size", str(context.exception)
)
os.close(fd)
def test_mmap_of_file_with_bigger_length_raises_value_error(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
with self.assertRaises(ValueError) as context:
mmap.mmap(fd, 10)
self.assertEqual(
"mmap length is greater than file size", str(context.exception)
)
os.close(fd)
def test_mmap_of_file_with_nonexistant_file_raises_os_error(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.close(fd)
os.remove(path)
with self.assertRaises(OSError) as context:
mmap.mmap(fd, 0)
self.assertEqual("[Errno 9] Bad file descriptor", str(context.exception))
def test_mmap_of_file_with_directory(self):
with tempfile.TemporaryDirectory() as dir_path:
fd = os.open(dir_path, os.O_RDONLY)
with self.assertRaises(OSError):
mmap.mmap(fd, 0)
os.close(fd)
def test_mmap_of_file_with_zero_length_gets_file_size(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
m = mmap.mmap(fd, 0)
view = memoryview(m)
self.assertEqual(view.nbytes, 5)
os.close(fd)
def test_mmap_of_file_can_write_to_file(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
m = mmap.mmap(fd, 3)
view = memoryview(m)
self.assertEqual(view.nbytes, 3)
view[:3] = b"foo"
os.close(fd)
with open(path) as f:
result = f.read()
self.assertEqual(result, "foolo")
def test_mmap_of_file_with_readonly_prot_is_readonly(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
m = mmap.mmap(fd, 3, prot=mmap.PROT_READ)
view = memoryview(m)
self.assertEqual(view.nbytes, 3)
with self.assertRaises(TypeError) as context:
view[:3] = b"foo"
self.assertEqual("cannot modify read-only memory", str(context.exception))
os.close(fd)
def test_mmap_of_file_with_private_memory_doesnt_map_changes_to_file(self):
with tempfile.TemporaryDirectory() as dir_path:
fd, path = tempfile.mkstemp(dir=dir_path)
os.write(fd, b"Hello")
m = mmap.mmap(fd, 3, flags=mmap.MAP_PRIVATE)
view = memoryview(m)
self.assertEqual(view.nbytes, 3)
view[:3] = b"foo"
os.close(fd)
with open(path) as f:
result = f.read()
self.assertEqual(result, "Hello")
def test_prot_constants_are_all_different(self):
self.assertNotEqual(mmap.PROT_EXEC, mmap.PROT_READ)
self.assertNotEqual(mmap.PROT_READ, mmap.PROT_WRITE)
self.assertNotEqual(mmap.PROT_WRITE, mmap.PROT_EXEC)
if __name__ == "__main__":
unittest.main()
|
Tools/Scenarios/visualize.py | ErQing/Nova | 212 | 12683415 | <reponame>ErQing/Nova
#!/usr/bin/env python3
import numpy as np
import skimage.io
import sobol_seq
from luaparser import astnodes
from nova_script_parser import get_node_name, parse_chapters, walk_functions
in_filename = 'scenario.txt'
out_filename = 'scenario.png'
MONOLOGUE_COLOR = (128, 128, 128)
BG_NONE_COLOR = (0, 0, 0)
BGM_NONE_COLOR = (0, 0, 0)
dialogue_width = 32
bg_width = 4
bgm_width = 4
str_to_color_config = {
'black': (0, 0, 0),
'white': (255, 255, 255),
}
bg_suffixes = ['blur']
str_to_color_cache = {}
str_to_color_seed = 2
def str_to_color(s):
global str_to_color_seed
if s in str_to_color_config:
return str_to_color_config[s]
if s in str_to_color_cache:
return str_to_color_cache[s]
rgb, str_to_color_seed = sobol_seq.i4_sobol(3, str_to_color_seed)
rgb = (rgb * 192).astype(int) + 64
rgb = rgb.tolist()
str_to_color_cache[s] = rgb
return rgb
def normalize_bg_name(s):
tokens = s.split('_')
while tokens[-1].isnumeric() or tokens[-1] in bg_suffixes:
tokens = tokens[:-1]
out = '_'.join(tokens)
return out
def chapter_to_tape(entries, chara_set, bg_set, timeline_set, bgm_set):
tape = []
dialogue_color = MONOLOGUE_COLOR
bg_color = BG_NONE_COLOR
timeline_color = BG_NONE_COLOR
bgm_color = BGM_NONE_COLOR
for code, chara_name, _ in entries:
if chara_name:
chara_set.add(chara_name)
dialogue_color = str_to_color(chara_name)
else:
dialogue_color = MONOLOGUE_COLOR
if code:
for func_name, args, _ in walk_functions(code):
if (func_name in [
'show', 'trans', 'trans2', 'trans_fade', 'trans_left',
'trans_right', 'trans_up', 'trans_down'
] and args and get_node_name(args[0]) == 'bg'
and isinstance(args[1], astnodes.String)):
bg_name = normalize_bg_name(args[1].s)
bg_set.add(bg_name)
bg_color = str_to_color(bg_name)
elif (func_name == 'show_loop' and args
and get_node_name(args[0]) == 'bg'):
bg_name = normalize_bg_name(args[1].fields[0].value.s)
bg_set.add(bg_name)
bg_color = str_to_color(bg_name)
elif (func_name == 'hide' and args
and get_node_name(args[0]) == 'bg'):
bg_color = BG_NONE_COLOR
elif func_name == 'timeline':
timeline_name = args[0].s
timeline_set.add(timeline_name)
timeline_color = str_to_color(timeline_name)
elif func_name == 'timeline_hide':
timeline_color = BG_NONE_COLOR
elif (func_name in ['play', 'fade_in'] and args
and get_node_name(args[0]) == 'bgm'):
bgm_name = args[1].s
bgm_set.add(bgm_name)
bgm_color = str_to_color(bgm_name)
elif (func_name in ['stop', 'fade_out'] and args
and get_node_name(args[0]) == 'bgm'):
bgm_color = BGM_NONE_COLOR
if bg_color != BG_NONE_COLOR:
_bg_color = bg_color
else:
_bg_color = timeline_color
tape.append((dialogue_color, _bg_color, bgm_color))
return tape
def tapes_to_img(tapes):
tape_width = dialogue_width + bg_width + bgm_width
img_height = max(len(tape) for tape in tapes)
img = np.zeros([img_height, len(tapes) * tape_width, 3], dtype=np.uint8)
for tape_idx, tape in enumerate(tapes):
img_tape = img[:,
tape_idx * tape_width:(tape_idx + 1) * tape_width:, :]
for idx, (dialogue_color, bg_color, bgm_color) in enumerate(tape):
img_tape[idx, :dialogue_width, :] = dialogue_color
img_tape[idx,
dialogue_width:dialogue_width + bg_width, :] = bg_color
img_tape[idx, dialogue_width + bg_width:, :] = bgm_color
return img
def main():
with open(in_filename, 'r', encoding='utf-8') as f:
chapters = parse_chapters(f)
tapes = []
chara_set = set()
bg_set = set()
timeline_set = set()
bgm_set = set()
for chapter_name, entries, _, _ in chapters:
print(chapter_name)
tapes.append(
chapter_to_tape(entries, chara_set, bg_set, timeline_set, bgm_set))
print()
print('Characters:')
for x in sorted(chara_set):
print(x)
print()
print('Backgrounds:')
for x in sorted(bg_set):
print(x)
print()
print('Timelines:')
for x in sorted(timeline_set):
print(x)
print()
print('BGM:')
for x in sorted(bgm_set):
print(x)
print()
img = tapes_to_img(tapes)
skimage.io.imsave(out_filename, img)
if __name__ == '__main__':
main()
|
python_legacy/iceberg/core/base_transaction.py | x-malet/iceberg | 502 | 12683426 | <reponame>x-malet/iceberg
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
from iceberg.api import (Table,
Transaction)
from iceberg.core import TableOperations
from iceberg.exceptions import CommitFailedException
class BaseTransaction(Transaction):
@staticmethod
def replace_table_transaction(ops, start):
return BaseTransaction(ops, start)
@staticmethod
def create_table_transaction(ops, start):
if ops.current() is not None:
raise RuntimeError("Cannot start create table transaction: table already exists")
@staticmethod
def new_transaction(ops):
return BaseTransaction(ops, ops.refesh())
def __init__(self, ops, start):
self.ops = ops
self.updates = list()
self.intermediate_snapshot_ids = set()
self.base = ops.current
if self.base is None and start is None:
self.type = TransactionType.CREATE_TABLE
elif self.base is not None and start != self.base:
self.type = TransactionType.REPLACE_TABLE
else:
self.type = TransactionType.SIMPLE
self.last_base = None
self.current = start
self.transaction_table = TransactionTable(self, self.current)
self.transaction_ops = TransactionTableOperations
def table(self):
return self.transaction_table
# NOTE: function name has typo in the word `comitted`. Kept for backwards compatability in legacy python API.
def check_last_operation_commited(self, operation):
if self.last_base == self.current:
raise RuntimeError("Cannot create new %s: last operation has not committed" % operation)
self.last_base = self.current
def update_schema(self):
self.check_last_operation_commited("UpdateSchema")
@staticmethod
def current_id(meta):
if meta is not None and meta.current_snapshot() is not None:
return meta.current_snapshot().snapshot_id
class TransactionType(enum.Enum):
CREATE_TABLE = 0
REPLACE_TABLE = 1
SIMPLE = 1
class TransactionTableOperations(TableOperations):
def __init__(self, bt):
self._bt = bt
def current(self):
return self._bt.current
def refresh(self):
return self._bt.current
def commit(self, base, metadata):
if base != self.current():
raise CommitFailedException("Table metadata refresh is required")
old_id = BaseTransaction.current_id(self._bt.current)
if old_id is not None and old_id not in (BaseTransaction.current_id(metadata),
BaseTransaction.current_id(base)):
self._bt.intermediate_snapshot_ids.add(old_id)
self._bt.current = metadata
def io(self):
return self._bt.ops.io()
def metadata_file_location(self, file):
return self._bt.ops.metadata_file_location(file)
def new_snapshot_id(self):
return self._bt.ops.new_snapshot_id()
class TransactionTable(Table):
def __init__(self, bt, current):
self.bt = bt
self.current = current
def refresh(self):
pass
def new_scan(self):
raise RuntimeError("Transaction tables do not support scans")
def schema(self):
return self.current.schema
def spec(self):
return self.current.spec
def properties(self):
return self.current.properties
def location(self):
return self.current.location
def current_snapshot(self):
return self.current.current_snapshot()
def snapshots(self):
return self.current.snapshots
def update_schema(self):
return self.bt.update_schema()
def update_properties(self):
return self.bt.update_properties()
def update_location(self):
return self.bt.update_location()
def new_append(self):
return self.bt.new_append()
def new_rewrite(self):
return self.bt.new_rewrite()
def new_overwrite(self):
return self.bt.new_overwrite()
def new_replace_partitions(self):
return self.bt.new_replace_partitions()
def new_delete(self):
return self.bt.new_delete()
def expire_snapshots(self):
return self.bt.expire_snapshots()
def rollback(self):
raise RuntimeError("Transaction tables do not support rollback")
def new_transaction(self):
raise RuntimeError("Cannot create a transaction within a transaction")
|
torchdyn/numerics/__init__.py | iisabeller/torchdyn | 825 | 12683429 | <reponame>iisabeller/torchdyn
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchdyn.numerics.solvers import Euler, RungeKutta4, Tsitouras45, DormandPrince45, AsynchronousLeapfrog, MSZero, MSBackward
from torchdyn.numerics.hypersolvers import HyperEuler
from torchdyn.numerics.odeint import odeint, odeint_symplectic, odeint_mshooting, odeint_hybrid
from torchdyn.numerics.systems import VanDerPol, Lorenz
__all__ = ['odeint', 'odeint_symplectic', 'Euler', 'RungeKutta4', 'DormandPrince45', 'Tsitouras45',
'AsynchronousLeapfrog', 'HyperEuler', 'MSZero', 'MSBackward', 'Lorenz', 'VanDerPol']
|
internal/common/expand_variables.bzl | kriswuollett/rules_nodejs | 645 | 12683434 | <reponame>kriswuollett/rules_nodejs
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to expand "make" variables of form $(VAR)
"""
def expand_variables(ctx, s, outs = [], output_dir = False, attribute_name = "args"):
"""This function is the same as ctx.expand_make_variables with the additional
genrule-like substitutions of:
- $@: The output file if it is a single file. Else triggers a build error.
- $(@D): The output directory. If there is only one file name in outs,
this expands to the directory containing that file. If there are multiple files,
this instead expands to the package's root directory in the bin tree,
even if all generated files belong to the same subdirectory!
- $(RULEDIR): The output directory of the rule, that is, the directory
corresponding to the name of the package containing the rule under the bin tree.
See https://docs.bazel.build/versions/main/be/general.html#genrule.cmd and
https://docs.bazel.build/versions/main/be/make-variables.html#predefined_genrule_variables
for more information of how these special variables are expanded.
"""
rule_dir = [f for f in [
ctx.bin_dir.path,
ctx.label.workspace_root,
ctx.label.package,
] if f]
additional_substitutions = {}
if output_dir:
if s.find("$@") != -1 or s.find("$(@)") != -1:
fail("""$@ substitution may only be used with output_dir=False.
Upgrading rules_nodejs? Maybe you need to switch from $@ to $(@D)
See https://github.com/bazelbuild/rules_nodejs/releases/tag/0.42.0""")
# We'll write into a newly created directory named after the rule
output_dir = [f for f in [
ctx.bin_dir.path,
ctx.label.workspace_root,
ctx.label.package,
ctx.label.name,
] if f]
else:
if s.find("$@") != -1 or s.find("$(@)") != -1:
if len(outs) > 1:
fail("""$@ substitution may only be used with a single out
Upgrading rules_nodejs? Maybe you need to switch from $@ to $(RULEDIR)
See https://github.com/bazelbuild/rules_nodejs/releases/tag/0.42.0""")
if len(outs) == 1:
additional_substitutions["@"] = outs[0].path
output_dir = outs[0].dirname.split("/")
else:
output_dir = rule_dir[:]
# The list comprehension removes empty segments like if we are in the root package
additional_substitutions["@D"] = "/".join([o for o in output_dir if o])
additional_substitutions["RULEDIR"] = "/".join([o for o in rule_dir if o])
return ctx.expand_make_variables(attribute_name, s, additional_substitutions)
|
apps/base/models/tax.py | youssriaboelseod/pyerp | 115 | 12683455 | <reponame>youssriaboelseod/pyerp
# Django Library
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Localfolder Library
from .father import PyFather
class PyTax(PyFather):
name = models.CharField(_("Name"), max_length=255)
amount = models.DecimalField(_("Amount"), max_digits=10, decimal_places=2, default=0)
include_price = models.BooleanField(_("Include Price"), default=True, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = _("Tax")
verbose_name_plural = _("PyTax")
|
reporting/basic/bin/rep.py | ga4gh/benchmarking-tools | 157 | 12683464 | <reponame>ga4gh/benchmarking-tools
#!/usr/bin/env python
# coding=utf-8
#
# Create simple reports from GA4GH benchmarking results
#
# Usage:
#
# For usage instructions run with option --help
#
# Author:
#
# <NAME> <<EMAIL>>
#
import sys
import os
import json
import argparse
import jinja2
import gzip
import copy
TEMPLATEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src", "html"))
LIBDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src", "python"))
sys.path.append(os.path.abspath(os.path.join(LIBDIR)))
import report.metrics
def extract_metrics(metrics):
""" Extract metrics and get data for tables.
This function takes a list of ROC values, and separates out the summary
values (which go into the tables) and the ROC datapoints (which are drawn).
:param metrics: a list of metrics as read by report.metrics
:return: { "snp/indel_table" : ... , "snp/indel_roc": ... }
"""
data_subset = [d.to_dict() for d in metrics
if d.type in ["SNP", "INDEL"] and
d.filter in ["ALL", "PASS"] and
d.genotype == "*"]
data_subset_snp = [d for d in data_subset if d["type"] == "SNP" and d["subtype"] == "*"]
data_subset_indel = [d for d in data_subset if d["type"] == "INDEL"]
data_subset_snp.sort(key=lambda x: x["subset"])
def _indel_type_order(t):
""" sort indel subtypes """
ordering = {
"D1_5": 1,
"I1_5": 2,
"C1_5": 3,
"D6_15": 4,
"I6_15": 5,
"C6_15": 6,
"D16_PLUS": 7,
"I16_PLUS": 8,
"C16_PLUS": 9
}
try:
return ordering[t]
except:
return 1000
data_subset_indel.sort(key=lambda x: [x["subset"], _indel_type_order(x["subtype"])])
data_subset_snp_roc = [copy.copy(d) for d in data_subset_snp if d["subtype"] == "*" and d["subset"] == "*"]
data_subset_indel_roc = [copy.copy(d) for d in data_subset_indel if d["subtype"] == "*" and d["subset"] == "*"]
# these just get turned into tables, so we don't need the ROC values
for d in data_subset_snp:
del d["roc"]
for d in data_subset_indel:
del d["roc"]
qq_fields = list(set([x.qq_field for x in metrics]))
# 3. run Jinja2 to make the HTML page
return {
"snp_table": json.dumps(json.dumps(data_subset_snp)),
"snp_roc": json.dumps(json.dumps(data_subset_snp_roc)),
"indel_table": json.dumps(json.dumps(data_subset_indel)),
"indel_roc": json.dumps(json.dumps(data_subset_indel_roc)),
"qq_fields": qq_fields,
}
def main():
parser = argparse.ArgumentParser(description="Create a variant calling report.")
parser.add_argument("input", help="Input file in GA4GH metrics CSV format. "
"To label multiple results, use the following pattern: "
"rep.py gatk-3_vcfeval-giab:gatk3.roc.all.csv.gz -o test.html ; this will"
"use the label gatk-3 for 'Method', and vcfeval-giab for the "
"'Comparison' header.", nargs="*")
parser.add_argument("-o", "--output", help="Output file name for reports, e.g. 'report' to write "
"report.html",
required=True)
parser.add_argument("--roc-max-datapoints",
help="Maximum number of data points in a ROC (higher numbers might slow down our plotting)",
dest="roc_datapoints", type=int, default=1000)
parser.add_argument("--roc-resolution",
help="Minimum difference in precision / recall covered by the ROC curves.",
dest="roc_diff", type=float, default=0.005)
parser.add_argument("--min-recall", help="Minimum recall for ROC curves (use to reduce size of output file by "
"clipping the bits of the ROC that are not meaningful)",
dest="min_recall", type=float, default=0.2)
parser.add_argument("--min-precision", help="Minimum precision for ROC curves (use to reduce size of output file by"
" clipping the bits of the ROC that are not meaningful)",
dest="min_precision", type=float, default=0.0)
args = parser.parse_args()
# 1. Read input files
if args.output.endswith(".gz"):
args.output = gzip.GzipFile(args.output, "w")
elif not args.output.endswith(".html"):
args.output += ".html"
metrics = []
for i in args.input:
l = i.split(":")
method_label = "default"
cmethod_label = "default"
if len(l) <= 1:
rfiles = [l[0]]
else:
rfiles = l[1:]
labels = l[0].split("_")
if len(labels) > 0:
method_label = labels[0]
if len(labels) > 1:
cmethod_label = labels[1]
print "reading %s as %s / %s" % (str(rfiles), method_label, cmethod_label)
row_metrics = report.metrics.read_qfy_csv(rfiles,
method=method_label,
cmethod=cmethod_label,
roc_metrics=["METRIC.Precision", "METRIC.Recall"],
roc_diff=args.roc_diff,
max_data_points=args.roc_datapoints,
minmax={"METRIC.Precision": {"min": args.min_precision},
"METRIC.Recall": {"min": args.min_recall}}
)
metrics += row_metrics
if not metrics:
raise Exception("No inputs specified.")
# 2. Subset data, only read SNP / indel, ALL, PASS
template_vars = extract_metrics(metrics)
# 3. render template
loader = jinja2.FileSystemLoader(searchpath=TEMPLATEDIR)
env = jinja2.Environment(loader=loader)
template = env.get_template("report.jinja2.html")
template.stream(**template_vars).dump(args.output)
if __name__ == '__main__':
main()
|
examples/TodoMVC/main.py | splashelec/atlas-python | 221 | 12683465 | <gh_stars>100-1000
"""
MIT License
Copyright (c) 2018 <NAME> (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os, sys
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append("../../atlastk")
import atlastk
class TodoMVC:
def __init__(self):
self.exclude = None
self.index = -1
self.todos = []
if False: # Set to 'True' for testing purpose.
self.todos.append({"label": "Todo 1", "completed": False })
self.todos.append({"label": "Todo 2", "completed": True })
def items_left(self):
count = 0
for index in range(len(self.todos)):
if not self.todos[index]['completed']:
count += 1
return count
def push(self, todo, id, xml):
xml.push_tag("Todo")
xml.put_attribute("id", id)
xml.put_attribute("completed", "true" if todo['completed'] else "false")
xml.putValue(todo['label'])
xml.pop_tag()
def display_count(self, dom, count):
text = ""
if count == 1:
text = "1 item left"
elif count != 0:
text = str(count) + " items left"
dom.set_value("Count", text)
def handle_count(self, dom):
count = self.items_left()
if count != len(self.todos):
dom.disable_element("HideClearCompleted")
else:
dom.enable_element("HideClearCompleted")
self.display_count(dom, count)
def display_todos(self, dom):
xml = atlastk.create_XML("XDHTML")
xml.push_tag("Todos")
for index in range(len(self.todos)):
todo = self.todos[index]
if (self.exclude == None) or (todo['completed'] != self.exclude):
self.push(todo, index, xml)
xml.pop_tag()
dom.inner("Todos", xml, "Todos.xsl")
self.handle_count(dom)
def submit_new(self, dom):
value = dom.get_value("Input").strip()
dom.set_value("Input", "")
if value:
self.todos.insert(0, {'label': value, 'completed': False})
self.display_todos(dom)
def submit_modification(self, dom):
index = self.index
self.index = -1
value = dom.get_value("Input." + str(index)).strip()
dom.set_value("Input." + str(index), "")
if value:
self.todos[index]['label'] = value
dom.set_value("Label." + str(index), value)
dom.remove_classes({"View." + str(index): "hide", "Todo." + str(index): "editing"})
else:
self.todos.pop(index)
self.displayTodos(dom)
def ac_connect(self, dom):
dom.inner("", open("Main.html").read())
dom.focus("Input")
self.display_todos(dom)
dom.disable_elements(["HideActive", "HideCompleted"])
def ac_destroy(self, dom, id):
self.todos.pop(int(dom.get_mark(id)))
self.display_todos(dom)
def ac_toggle(self, dom, id):
index = int(id)
self.todos[index]['completed'] = not self.todos[index]['completed']
dom.toggle_class("Todo." + id, "completed")
dom.toggle_class("Todo." + id, "active")
self.handle_count(dom)
def ac_all(self, dom):
self.exclude = None
dom.add_class("All", "selected")
dom.remove_classes({"Active": "selected", "Completed": "selected"})
dom.disable_elements(["HideActive", "HideCompleted"])
def ac_active(self, dom):
self.exclude = True
dom.add_class("Active", "selected")
dom.remove_classes({"All": "selected", "Completed": "selected"})
dom.disable_element("HideActive")
dom.enable_element("HideCompleted")
def ac_completed(self, dom):
self.exclude = False
dom.add_class("Completed", "selected")
dom.remove_classes({"All": "selected", "Active": "selected"})
dom.disable_element("HideCompleted")
dom.enable_element("HideActive")
def ac_clear(self, dom):
index = len(self.todos)
while index:
index -= 1
if self.todos[index]['completed']:
self.todos.pop(index)
self.display_todos(dom)
def ac_edit(self, dom, id):
value = dom.get_mark(id)
self.index = int(value)
dom.add_classes({"View." + value: "hide", id: "editing"})
dom.set_value("Input." + value, self.todos[self.index]['label'])
dom.focus("Input." + value)
def ac_cancel(self, dom):
index = str(self.index)
self.index = -1
dom.set_value("Input." + index, "")
dom.remove_classes({"View." + index: "hide", "Todo." + index: "editing"})
callbacks = {
"": ac_connect,
"Submit": lambda self, dom: self.submit_new(dom) if self.index == -1 else self.submit_modification(dom),
"Destroy": ac_destroy,
"Toggle": ac_toggle,
"All": ac_all,
"Active": ac_active,
"Completed": ac_completed,
"Clear": ac_clear,
"Edit": ac_edit,
"Cancel": ac_cancel,
}
atlastk.launch(callbacks, TodoMVC, open("HeadFaaS.html").read())
|
examples/cross_val/scripts/generate_folds.py | ClementMayer/substra | 119 | 12683483 | # Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
from sklearn.model_selection import KFold
N_FOLDS = 4
current_directory = os.path.dirname(__file__)
assets_keys_path = os.path.join(current_directory, '../../titanic/assets_keys.json')
print(f'Loading existing asset keys from {os.path.abspath(assets_keys_path)}...')
with open(assets_keys_path, 'r') as f:
assets_keys = json.load(f)
train_data_sample_keys = assets_keys['train_data_sample_keys']
print('Generating folds...')
X = np.array(train_data_sample_keys)
kf = KFold(n_splits=N_FOLDS, shuffle=True)
folds = [
{
'train_data_sample_keys': list(X[train_index]),
'test_data_sample_keys': list(X[test_index])
} for train_index, test_index in kf.split(X)
]
with open(os.path.join(current_directory, '../folds_keys.json'), 'w') as f:
json.dump({'folds': folds}, f, indent=2)
print(f'Folds keys have been saved to {os.path.abspath(assets_keys_path)}')
|
venv/Lib/site-packages/tzlocal/__init__.py | ajayiagbebaku/NFL-Model | 137 | 12683492 | <gh_stars>100-1000
import sys
if sys.platform == "win32":
from tzlocal.win32 import (
get_localzone,
get_localzone_name,
reload_localzone,
) # pragma: no cover
else:
from tzlocal.unix import get_localzone, get_localzone_name, reload_localzone
__all__ = ["get_localzone", "get_localzone_name", "reload_localzone"]
|
setup.py | german-levi/django-hitcount | 348 | 12683495 | # -*- coding: utf-8 -*-
import os
from setuptools import setup
hitcount = __import__('hitcount')
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-hitcount",
version=hitcount.__version__,
include_package_data=True,
packages=['hitcount'],
url='http://github.com/thornomad/django-hitcount',
license='BSD',
description="Hit counting application for Django.",
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
install_requires=[
'django-etc>=1.2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
zip_safe=False,
)
|
tests/detection/test_node_merger.py | grischard/OSMDeepOD | 156 | 12683501 | import pytest
from src.base.node import Node
from src.data.osm.node_merger import NodeMerger
@pytest.fixture(scope="module")
def node_list():
n1 = Node(7.41275611, 46.922925, 1)
n2 = Node(7.41275612, 46.922925, 2)
n3 = Node(7.41275613, 46.922925, 3)
n4 = Node(8.412797, 46.922942, 4)
n5 = Node(8.412797, 46.922941, 5)
return [n1, n2, n3, n4, n5]
@pytest.fixture(scope="module")
def same_node():
return Node(46.78351333884473, 8.159137666225423, 10)
@pytest.fixture(scope="module")
def big_node_list():
return [Node(47.09572760391754, 9.354246854782108, 0.0), Node(47.09569108531167, 9.353826284408573, 0.0),
Node(47.095734907638715, 9.353978633880619, 0.0), Node(47.091450260764105, 9.347023665904997, 0.0),
Node(47.09598323415865, 9.353849887847904, 0.0), Node(47.09582072636252, 9.354110956192018, 0.0),
Node(47.095880982062205, 9.353635311126713, 0.0), Node(47.09582255229281, 9.353581666946415, 0.0)]
def test_get_neighbors(node_list):
merger = NodeMerger(node_list)
merger._generate_near_dict()
result_list = merger._get_neighbors(node_list[0])
assert len(result_list) == 3
result_list = merger._get_neighbors(node_list[3])
assert len(result_list) == 2
def test_reduce(node_list):
merger = NodeMerger(node_list)
merged_nodes = merger.reduce()
assert len(merged_nodes) == 2
def test_reduce_same_points(same_node):
merger = NodeMerger([same_node, same_node])
merged_nodes = merger.reduce()
assert len(merged_nodes) == 1
def test_reduce_not_same_points(same_node):
node = Node(46.78351333884473, 8.159137666225423, 0)
merger = NodeMerger([node, same_node])
merged_nodes = merger.reduce()
assert len(merged_nodes) == 1
def test_node_merger(big_node_list):
merger = NodeMerger(big_node_list, 30)
nodes = merger.reduce()
assert len(nodes) == 2
|
python/src/vmaf/svmutil.py | aachenmax/vmaf | 2,874 | 12683525 | <reponame>aachenmax/vmaf<gh_stars>1000+
# TODO: dependency on src/libsvm/svmutil needs to be properly done, this is a temporary workaround wrapper
from __future__ import absolute_import
import sys
from vmaf.config import VmafConfig
# This will work only when running with a checked out vmaf source, but not via pip install
libsvm_path = VmafConfig.root_path('third_party', 'libsvm', 'python')
if libsvm_path not in sys.path:
# Inject {project}/src/libsvm/python to PYTHONPATH dynamically
sys.path.append(libsvm_path)
try:
# This import will work only if above injection was meaningful (ie: user has the files in the right place)
from svmutil import * # noqa
except ImportError as e:
print("Can't import svmutil from %s: %s" % (libsvm_path, e))
sys.exit(1)
|
Lib/site-packages/faker/utils/datasets.py | Nibraz15/FullTextSearch | 412 | 12683549 | # coding=utf-8
import operator
from collections import Counter
from functools import reduce
def add_dicts(*args):
"""
Adds two or more dicts together. Common keys will have their values added.
For example::
>>> t1 = {'a':1, 'b':2}
>>> t2 = {'b':1, 'c':3}
>>> t3 = {'d':4}
>>> add_dicts(t1, t2, t3)
{'a': 1, 'c': 3, 'b': 3, 'd': 4}
"""
counters = [Counter(arg) for arg in args]
return dict(reduce(operator.add, counters))
|
diffengine/sendgrid.py | jcoffi/diffengine | 175 | 12683576 | <filename>diffengine/sendgrid.py
import logging
from datetime import datetime
from sendgrid import Mail, Bcc, SendGridAPIClient
from diffengine.exceptions.sendgrid import (
AlreadyEmailedError,
SendgridConfigNotFoundError,
SendgridArchiveUrlNotFoundError,
)
class SendgridHandler:
api_token = None
sender = None
recipients = None
def __init__(self, config):
if not all(["api_token" in config, "sender" in config, "recipients" in config]):
logging.warning(
"No global config found for sendgrid, expecting config set for each feed"
)
self.api_token = config.get("api_token")
self.sender = config.get("sender")
self.recipients = self.build_recipients(config.get("recipients"))
def mailer(self, api_token):
return SendGridAPIClient(api_token)
def build_recipients(self, recipients):
if recipients:
return [x.strip() for x in recipients.split(",")]
def build_subject(self, diff):
return diff.old.title
def build_html_body(self, diff):
body = None
with open(diff.html_path) as html_file:
body = html_file.read()
return body
def publish_diff(self, diff, feed_config):
if diff.emailed:
raise AlreadyEmailedError(diff.id)
elif not (diff.old.archive_url and diff.new.archive_url):
raise SendgridArchiveUrlNotFoundError()
api_token = feed_config.get("api_token", self.api_token)
sender = feed_config.get("sender", self.sender)
recipients = None
if feed_config.get("recipients"):
recipients = self.build_recipients(feed_config.get("recipients"))
else:
recipients = self.recipients
if not all([api_token, sender, recipients]):
raise SendgridConfigNotFoundError
subject = self.build_subject(diff)
message = Mail(
from_email=sender,
subject=subject,
to_emails=recipients.pop(0),
html_content=self.build_html_body(diff),
)
if recipients:
message.bcc = recipients
try:
self.mailer(api_token).send(message)
diff.emailed = datetime.utcnow()
logging.info("emailed %s", subject)
diff.save()
except Exception as e:
logging.error("unable to email: %s", e)
|
tests/unit/test_env_yml.py | davidjsherman/repo2docker | 1,047 | 12683583 | <gh_stars>1000+
"""
Test if the environment.yml is empty or it constains other data structure than a dictionary
"""
import os
import sys
import pytest
from repo2docker import buildpacks
def test_empty_env_yml(tmpdir):
tmpdir.chdir()
p = tmpdir.join("environment.yml")
p.write("")
bp = buildpacks.CondaBuildPack()
py_ver = bp.python_version
# If the environment.yml is empty python_version will get an empty string
assert py_ver == ""
def test_no_dict_env_yml(tmpdir):
tmpdir.chdir()
q = tmpdir.join("environment.yml")
q.write("numpy\n " "matplotlib\n")
bq = buildpacks.CondaBuildPack()
with pytest.raises(TypeError):
py_ver = bq.python_version
|
tests/handhistory/ftp_hands.py | Marauder62/poker | 315 | 12683597 | <reponame>Marauder62/poker<filename>tests/handhistory/ftp_hands.py
HAND1 = """
Full Tilt Poker Game #33286946295: MiniFTOPS Main Event (255707037), Table 179 - NL Hold'em - 10/20 - 19:26:50 CET - 2013/09/22 [13:26:50 ET - 2013/09/22]
Seat 1: Popp1987 (13,587)
Seat 2: Luckytobgood (10,110)
Seat 3: FatalRevange (9,970)
Seat 4: IgaziFerfi (10,000)
Seat 5: egis25 (6,873)
Seat 6: gamblie (9,880)
Seat 7: idanuTz1 (10,180)
Seat 8: PtheProphet (9,930)
Seat 9: JohnyyR (9,840)
gamblie posts the small blind of 10
idanuTz1 posts the big blind of 20
The button is in seat #5
*** HOLE CARDS ***
Dealt to IgaziFerfi [9d Ks]
PtheProphet has 15 seconds left to act
PtheProphet folds
JohnyyR raises to 40
Popp1987 has 15 seconds left to act
Popp1987 folds
Luckytobgood folds
FatalRevange raises to 100
IgaziFerfi folds
egis25 folds
gamblie folds
idanuTz1 folds
JohnyyR has 15 seconds left to act
JohnyyR calls 60
*** FLOP *** [8h 4h Tc] (Total Pot: 230, 2 Players)
JohnyyR checks
FatalRevange has 15 seconds left to act
FatalRevange bets 120
JohnyyR folds
Uncalled bet of 120 returned to FatalRevange
FatalRevange mucks
FatalRevange wins the pot (230)
*** SUMMARY ***
Total pot 230 | Rake 0
Board: [8h 4h Tc]
Seat 1: Popp1987 didn't bet (folded)
Seat 2: Luckytobgood didn't bet (folded)
Seat 3: FatalRevange collected (230), mucked
Seat 4: IgaziFerfi didn't bet (folded)
Seat 5: egis25 (button) didn't bet (folded)
Seat 6: gamblie (small blind) folded before the Flop
Seat 7: idanuTz1 (big blind) folded before the Flop
Seat 8: PtheProphet didn't bet (folded)
Seat 9: JohnyyR folded on the Flop
"""
TURBO_SNG = """\
Full Tilt Poker Game #34374264321: $10 Sit & Go (Turbo) (268569961), Table 1 - NL Hold'em - 15/30 - 11:57:01 CET - 2014/06/29 [05:57:01 ET - 2014/06/29]
Seat 1: snake 422 (1,500)
Seat 2: IgaziFerfi (1,500)
Seat 3: MixaOne (1,500)
Seat 4: BokkaBlake (1,500)
Seat 5: Sajiee (1,500)
Seat 6: AzzzJJ (1,500)
snake 422 posts the small blind of 15
IgaziFerfi posts the big blind of 30
The button is in seat #6
*** HOLE CARDS ***
Dealt to IgaziFerfi [2h 5d]
MixaOne calls 30
BokkaBlake folds
Sajiee folds
AzzzJJ raises to 90
snake 422 folds
IgaziFerfi folds
MixaOne calls 60
*** FLOP *** [6s 9c 3d] (Total Pot: 225, 2 Players)
MixaOne bets 30
AzzzJJ raises to 120
MixaOne folds
Uncalled bet of 90 returned to AzzzJJ
AzzzJJ mucks
AzzzJJ wins the pot (285)
*** SUMMARY ***
Total pot 285 | Rake 0
Board: [6s 9c 3d]
Seat 1: snake 422 (small blind) folded before the Flop
Seat 2: IgaziFerfi (big blind) folded before the Flop
Seat 3: MixaOne folded on the Flop
Seat 4: BokkaBlake didn't bet (folded)
Seat 5: Sajiee didn't bet (folded)
Seat 6: AzzzJJ (button) collected (285), mucked
"""
|
checkov/terraform/checks/resource/aws/ELBUsesSSL.py | pmalkki/checkov | 4,013 | 12683599 | <reponame>pmalkki/checkov
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class ELBUsesSSL(BaseResourceCheck):
def __init__(self):
name = "Ensure that Elastic Load Balancer(s) uses SSL certificates provided by AWS Certificate Manager"
id = "CKV_AWS_127"
supported_resources = ['aws_elb']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
self.evaluated_keys = ['listener']
if 'listener' in conf:
for idx, listener in enumerate(conf['listener']):
if 'ssl_certificate_id' not in listener:
self.evaluated_keys = [f'listener/{idx}']
return CheckResult.FAILED
return CheckResult.PASSED
check = ELBUsesSSL()
|
alive_progress/core/calibration.py | Shinyh29/alive-progress | 3,304 | 12683610 | import math
def calibrated_fps(calibrate):
"""Calibration of the dynamic frames per second engine.
I've started with the equation y = log10(x + m) * k + n, where:
y is the desired fps, m and n are horizontal and vertical translation,
k is a calibration factor, computed from some user input c (see readme for details).
Considering minfps and maxfps as given constants, I came to:
fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c,
so the factor k = (maxfps - minfps) / log10(c + 1), and
fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps
Neat! ;)
Args:
calibrate (float): user provided
Returns:
a callable to calculate the fps
"""
min_fps, max_fps = 2., 60.
calibrate = max(1e-6, calibrate)
adjust_log_curve = 100. / min(calibrate, 100.) # adjust the curve for small numbers
factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.)
def fps(rate):
if rate <= 0:
return 10. # bootstrap speed
if rate < calibrate:
return math.log10((rate * adjust_log_curve) + 1.) * factor + min_fps
return max_fps
return fps
|
Software/Python/grove_ir_receiver.py | benmcclelland/GrovePi | 482 | 12683654 | #!/usr/bin/env python
#
# GrovePi Example for using the Grove - Infrared Receiver (http://www.seeedstudio.com/depot/Grove-Infrared-Receiver-p-994.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# NOTE:
# Connect the IR sensor to any port. In the code use the pin as port+1. So if you are connecting the sensor to port 7, use "ir_recv_pin(8)"
import time
import grovepi
grovepi.ir_recv_pin(9)
print ("Press any button on the remote to see the data")
while True:
ir_data_back=grovepi.ir_read_signal()
if ir_data_back[0]==-1: #IO Error
pass
elif ir_data_back[0]==0: #Old signal
pass
else:
print(ir_data_back[1:]) #Current signal from IR remote
time.sleep(.1) |
backend/src/baserow/api/user_files/serializers.py | cjh0613/baserow | 839 | 12683692 | from rest_framework import serializers
from drf_spectacular.utils import extend_schema_field
from drf_spectacular.types import OpenApiTypes
from django.conf import settings
from django.core.files.storage import default_storage
from django.utils.translation import gettext_lazy as _
from baserow.core.models import UserFile
from baserow.core.user_files.handler import UserFileHandler
class UserFileUploadViaURLRequestSerializer(serializers.Serializer):
url = serializers.URLField()
class UserFileURLAndThumbnailsSerializerMixin(serializers.Serializer):
url = serializers.SerializerMethodField()
thumbnails = serializers.SerializerMethodField()
def get_instance_attr(self, instance, name):
return getattr(instance, name)
@extend_schema_field(OpenApiTypes.URI)
def get_url(self, instance):
name = self.get_instance_attr(instance, "name")
path = UserFileHandler().user_file_path(name)
url = default_storage.url(path)
return url
@extend_schema_field(OpenApiTypes.OBJECT)
def get_thumbnails(self, instance):
if not self.get_instance_attr(instance, "is_image"):
return None
name = self.get_instance_attr(instance, "name")
return {
thumbnail_name: {
"url": default_storage.url(
UserFileHandler().user_file_thumbnail_path(name, thumbnail_name)
),
"width": size[0],
"height": size[1],
}
for thumbnail_name, size in settings.USER_THUMBNAILS.items()
}
class UserFileSerializer(
UserFileURLAndThumbnailsSerializerMixin, serializers.ModelSerializer
):
name = serializers.SerializerMethodField()
class Meta:
model = UserFile
fields = (
"size",
"mime_type",
"is_image",
"image_width",
"image_height",
"uploaded_at",
"url",
"thumbnails",
"name",
"original_name",
)
@extend_schema_field(OpenApiTypes.STR)
def get_name(self, instance):
return instance.name
@extend_schema_field(UserFileSerializer)
class UserFileField(serializers.Field):
"""
This field can be used for validating user provided user files, which means a
user has provided a dict containing the user file name. It will check if that
user file exists and returns that instance. Vice versa, a user file instance will
be serialized when converted to data by the serializer.
Example:
Serializer(data={
"user_file": {"name": "filename.jpg"}
}).data == {"user_file": UserFile(...)}
The field can also be used for serializing a user file. The value must then be
provided as instance to the serializer.
Example:
Serializer({
"user_file": UserFile(...)
}).data == {"user_file": {"name": "filename.jpg", ...}}
"""
default_error_messages = {
"invalid_value": _("The value must be an object containing the file name."),
"invalid_user_file": _("The provided user file does not exist."),
}
def __init__(self, *args, **kwargs):
allow_null = kwargs.pop("allow_null", True)
default = kwargs.pop("default", None)
super().__init__(allow_null=allow_null, default=default, *args, **kwargs)
def to_internal_value(self, data):
if isinstance(data, UserFile):
return data
if not isinstance(data, dict) or not isinstance(data.get("name"), str):
self.fail("invalid_value")
try:
user_file = UserFile.objects.all().name(data["name"]).get()
except UserFile.DoesNotExist:
self.fail("invalid_user_file")
return user_file
def to_representation(self, value):
if isinstance(value, UserFile) and self.parent.instance is not None:
return UserFileSerializer(value).data
return value
|
.venv/lib/python3.7/site-packages/IPython/terminal/pt_inputhooks/__init__.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 1,318 | 12683715 | import importlib
import os
aliases = {
'qt4': 'qt',
'gtk2': 'gtk',
}
backends = [
'qt', 'qt4', 'qt5',
'gtk', 'gtk2', 'gtk3',
'tk',
'wx',
'pyglet', 'glut',
'osx',
'asyncio'
]
registered = {}
def register(name, inputhook):
"""Register the function *inputhook* as an event loop integration."""
registered[name] = inputhook
class UnknownBackend(KeyError):
def __init__(self, name):
self.name = name
def __str__(self):
return ("No event loop integration for {!r}. "
"Supported event loops are: {}").format(self.name,
', '.join(backends + sorted(registered)))
def get_inputhook_name_and_func(gui):
if gui in registered:
return gui, registered[gui]
if gui not in backends:
raise UnknownBackend(gui)
if gui in aliases:
return get_inputhook_name_and_func(aliases[gui])
gui_mod = gui
if gui == 'qt5':
os.environ['QT_API'] = 'pyqt5'
gui_mod = 'qt'
mod = importlib.import_module('IPython.terminal.pt_inputhooks.'+gui_mod)
return gui, mod.inputhook
|
tools/clang/utils/perf-training/perf-helper.py | oubotong/Armariris | 1,073 | 12683729 | #===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
from __future__ import print_function
import sys
import os
import subprocess
import argparse
import time
import bisect
import shlex
import tempfile
test_env = { 'PATH' : os.environ['PATH'] }
def findFilesWithExtension(path, extension):
filenames = []
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(extension):
filenames.append(os.path.join(root, filename))
return filenames
def clean(args):
if len(args) != 2:
print('Usage: %s clean <path> <extension>\n' % __file__ +
'\tRemoves all files with extension from <path>.')
return 1
for filename in findFilesWithExtension(args[0], args[1]):
os.remove(filename)
return 0
def merge(args):
if len(args) != 3:
print('Usage: %s clean <llvm-profdata> <output> <path>\n' % __file__ +
'\tMerges all profraw files from path into output.')
return 1
cmd = [args[0], 'merge', '-o', args[1]]
cmd.extend(findFilesWithExtension(args[2], "profraw"))
subprocess.check_call(cmd)
return 0
def dtrace(args):
parser = argparse.ArgumentParser(prog='perf-helper dtrace',
description='dtrace wrapper for order file generation')
parser.add_argument('--buffer-size', metavar='size', type=int, required=False,
default=1, help='dtrace buffer size in MB (default 1)')
parser.add_argument('--use-oneshot', required=False, action='store_true',
help='Use dtrace\'s oneshot probes')
parser.add_argument('--use-ustack', required=False, action='store_true',
help='Use dtrace\'s ustack to print function names')
parser.add_argument('--cc1', required=False, action='store_true',
help='Execute cc1 directly (don\'t profile the driver)')
parser.add_argument('cmd', nargs='*', help='')
# Use python's arg parser to handle all leading option arguments, but pass
# everything else through to dtrace
first_cmd = next(arg for arg in args if not arg.startswith("--"))
last_arg_idx = args.index(first_cmd)
opts = parser.parse_args(args[:last_arg_idx])
cmd = args[last_arg_idx:]
if opts.cc1:
cmd = get_cc1_command_for_args(cmd, test_env)
if opts.use_oneshot:
target = "oneshot$target:::entry"
else:
target = "pid$target:::entry"
predicate = '%s/probemod=="%s"/' % (target, os.path.basename(args[0]))
log_timestamp = 'printf("dtrace-TS: %d\\n", timestamp)'
if opts.use_ustack:
action = 'ustack(1);'
else:
action = 'printf("dtrace-Symbol: %s\\n", probefunc);'
dtrace_script = "%s { %s; %s }" % (predicate, log_timestamp, action)
dtrace_args = []
if not os.geteuid() == 0:
print(
'Script must be run as root, or you must add the following to your sudoers:'
+ '%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace')
dtrace_args.append("sudo")
dtrace_args.extend((
'dtrace', '-xevaltime=exec',
'-xbufsize=%dm' % (opts.buffer_size),
'-q', '-n', dtrace_script,
'-c', ' '.join(cmd)))
if sys.platform == "darwin":
dtrace_args.append('-xmangled')
start_time = time.time()
with open("%d.dtrace" % os.getpid(), "w") as f:
subprocess.check_call(dtrace_args, stdout=f, stderr=subprocess.PIPE)
elapsed = time.time() - start_time
print("... data collection took %.4fs" % elapsed)
return 0
def get_cc1_command_for_args(cmd, env):
# Find the cc1 command used by the compiler. To do this we execute the
# compiler with '-###' to figure out what it wants to do.
cmd = cmd + ['-###']
cc_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env).strip()
cc_commands = []
for ln in cc_output.split('\n'):
# Filter out known garbage.
if (ln == 'Using built-in specs.' or
ln.startswith('Configured with:') or
ln.startswith('Target:') or
ln.startswith('Thread model:') or
ln.startswith('InstalledDir:') or
ln.startswith('LLVM Profile Note') or
' version ' in ln):
continue
cc_commands.append(ln)
if len(cc_commands) != 1:
print('Fatal error: unable to determine cc1 command: %r' % cc_output)
exit(1)
cc1_cmd = shlex.split(cc_commands[0])
if not cc1_cmd:
print('Fatal error: unable to determine cc1 command: %r' % cc_output)
exit(1)
return cc1_cmd
def cc1(args):
parser = argparse.ArgumentParser(prog='perf-helper cc1',
description='cc1 wrapper for order file generation')
parser.add_argument('cmd', nargs='*', help='')
# Use python's arg parser to handle all leading option arguments, but pass
# everything else through to dtrace
first_cmd = next(arg for arg in args if not arg.startswith("--"))
last_arg_idx = args.index(first_cmd)
opts = parser.parse_args(args[:last_arg_idx])
cmd = args[last_arg_idx:]
# clear the profile file env, so that we don't generate profdata
# when capturing the cc1 command
cc1_env = test_env
cc1_env["LLVM_PROFILE_FILE"] = os.devnull
cc1_cmd = get_cc1_command_for_args(cmd, cc1_env)
subprocess.check_call(cc1_cmd)
return 0
def parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
missing_symbols, opts):
def fix_mangling(symbol):
if sys.platform == "darwin":
if symbol[0] != '_' and symbol != 'start':
symbol = '_' + symbol
return symbol
def get_symbols_with_prefix(symbol):
start_index = bisect.bisect_left(all_symbols, symbol)
for s in all_symbols[start_index:]:
if not s.startswith(symbol):
break
yield s
# Extract the list of symbols from the given file, which is assumed to be
# the output of a dtrace run logging either probefunc or ustack(1) and
# nothing else. The dtrace -xdemangle option needs to be used.
#
# This is particular to OS X at the moment, because of the '_' handling.
with open(path) as f:
current_timestamp = None
for ln in f:
# Drop leading and trailing whitespace.
ln = ln.strip()
if not ln.startswith("dtrace-"):
continue
# If this is a timestamp specifier, extract it.
if ln.startswith("dtrace-TS: "):
_,data = ln.split(': ', 1)
if not data.isdigit():
print("warning: unrecognized timestamp line %r, ignoring" % ln,
file=sys.stderr)
continue
current_timestamp = int(data)
continue
elif ln.startswith("dtrace-Symbol: "):
_,ln = ln.split(': ', 1)
if not ln:
continue
# If there is a '`' in the line, assume it is a ustack(1) entry in
# the form of <modulename>`<modulefunc>, where <modulefunc> is never
# truncated (but does need the mangling patched).
if '`' in ln:
yield (current_timestamp, fix_mangling(ln.split('`',1)[1]))
continue
# Otherwise, assume this is a probefunc printout. DTrace on OS X
# seems to have a bug where it prints the mangled version of symbols
# which aren't C++ mangled. We just add a '_' to anything but start
# which doesn't already have a '_'.
symbol = fix_mangling(ln)
# If we don't know all the symbols, or the symbol is one of them,
# just return it.
if not all_symbols_set or symbol in all_symbols_set:
yield (current_timestamp, symbol)
continue
# Otherwise, we have a symbol name which isn't present in the
# binary. We assume it is truncated, and try to extend it.
# Get all the symbols with this prefix.
possible_symbols = list(get_symbols_with_prefix(symbol))
if not possible_symbols:
continue
# If we found too many possible symbols, ignore this as a prefix.
if len(possible_symbols) > 100:
print( "warning: ignoring symbol %r " % symbol +
"(no match and too many possible suffixes)", file=sys.stderr)
continue
# Report that we resolved a missing symbol.
if opts.show_missing_symbols and symbol not in missing_symbols:
print("warning: resolved missing symbol %r" % symbol, file=sys.stderr)
missing_symbols.add(symbol)
# Otherwise, treat all the possible matches as having occurred. This
# is an over-approximation, but it should be ok in practice.
for s in possible_symbols:
yield (current_timestamp, s)
def uniq(list):
seen = set()
for item in list:
if item not in seen:
yield item
seen.add(item)
def form_by_call_order(symbol_lists):
# Simply strategy, just return symbols in order of occurrence, even across
# multiple runs.
return uniq(s for symbols in symbol_lists for s in symbols)
def form_by_call_order_fair(symbol_lists):
# More complicated strategy that tries to respect the call order across all
# of the test cases, instead of giving a huge preference to the first test
# case.
# First, uniq all the lists.
uniq_lists = [list(uniq(symbols)) for symbols in symbol_lists]
# Compute the successors for each list.
succs = {}
for symbols in uniq_lists:
for a,b in zip(symbols[:-1], symbols[1:]):
succs[a] = items = succs.get(a, [])
if b not in items:
items.append(b)
# Emit all the symbols, but make sure to always emit all successors from any
# call list whenever we see a symbol.
#
# There isn't much science here, but this sometimes works better than the
# more naive strategy. Then again, sometimes it doesn't so more research is
# probably needed.
return uniq(s
for symbols in symbol_lists
for node in symbols
for s in ([node] + succs.get(node,[])))
def form_by_frequency(symbol_lists):
# Form the order file by just putting the most commonly occurring symbols
# first. This assumes the data files didn't use the oneshot dtrace method.
counts = {}
for symbols in symbol_lists:
for a in symbols:
counts[a] = counts.get(a,0) + 1
by_count = counts.items()
by_count.sort(key = lambda (_,n): -n)
return [s for s,n in by_count]
def form_by_random(symbol_lists):
# Randomize the symbols.
merged_symbols = uniq(s for symbols in symbol_lists
for s in symbols)
random.shuffle(merged_symbols)
return merged_symbols
def form_by_alphabetical(symbol_lists):
# Alphabetize the symbols.
merged_symbols = list(set(s for symbols in symbol_lists for s in symbols))
merged_symbols.sort()
return merged_symbols
methods = dict((name[len("form_by_"):],value)
for name,value in locals().items() if name.startswith("form_by_"))
def genOrderFile(args):
parser = argparse.ArgumentParser(
"%prog [options] <dtrace data file directories>]")
parser.add_argument('input', nargs='+', help='')
parser.add_argument("--binary", metavar="PATH", type=str, dest="binary_path",
help="Path to the binary being ordered (for getting all symbols)",
default=None)
parser.add_argument("--output", dest="output_path",
help="path to output order file to write", default=None, required=True,
metavar="PATH")
parser.add_argument("--show-missing-symbols", dest="show_missing_symbols",
help="show symbols which are 'fixed up' to a valid name (requires --binary)",
action="store_true", default=None)
parser.add_argument("--output-unordered-symbols",
dest="output_unordered_symbols_path",
help="write a list of the unordered symbols to PATH (requires --binary)",
default=None, metavar="PATH")
parser.add_argument("--method", dest="method",
help="order file generation method to use", choices=methods.keys(),
default='call_order')
opts = parser.parse_args(args)
# If the user gave us a binary, get all the symbols in the binary by
# snarfing 'nm' output.
if opts.binary_path is not None:
output = subprocess.check_output(['nm', '-P', opts.binary_path])
lines = output.split("\n")
all_symbols = [ln.split(' ',1)[0]
for ln in lines
if ln.strip()]
print("found %d symbols in binary" % len(all_symbols))
all_symbols.sort()
else:
all_symbols = []
all_symbols_set = set(all_symbols)
# Compute the list of input files.
input_files = []
for dirname in opts.input:
input_files.extend(findFilesWithExtension(dirname, "dtrace"))
# Load all of the input files.
print("loading from %d data files" % len(input_files))
missing_symbols = set()
timestamped_symbol_lists = [
list(parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
missing_symbols, opts))
for path in input_files]
# Reorder each symbol list.
symbol_lists = []
for timestamped_symbols_list in timestamped_symbol_lists:
timestamped_symbols_list.sort()
symbol_lists.append([symbol for _,symbol in timestamped_symbols_list])
# Execute the desire order file generation method.
method = methods.get(opts.method)
result = list(method(symbol_lists))
# Report to the user on what percentage of symbols are present in the order
# file.
num_ordered_symbols = len(result)
if all_symbols:
print("note: order file contains %d/%d symbols (%.2f%%)" % (
num_ordered_symbols, len(all_symbols),
100.*num_ordered_symbols/len(all_symbols)), file=sys.stderr)
if opts.output_unordered_symbols_path:
ordered_symbols_set = set(result)
with open(opts.output_unordered_symbols_path, 'w') as f:
f.write("\n".join(s for s in all_symbols if s not in ordered_symbols_set))
# Write the order file.
with open(opts.output_path, 'w') as f:
f.write("\n".join(result))
f.write("\n")
return 0
commands = {'clean' : clean,
'merge' : merge,
'dtrace' : dtrace,
'cc1' : cc1,
'gen-order-file' : genOrderFile}
def main():
f = commands[sys.argv[1]]
sys.exit(f(sys.argv[2:]))
if __name__ == '__main__':
main()
|
scalyr_agent/third_party_tls/oscrypto/_cipher_suites.py | zak905/scalyr-agent-2 | 3,373 | 12683751 | <reponame>zak905/scalyr-agent-2
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__all__ = [
'CIPHER_SUITE_MAP',
]
CIPHER_SUITE_MAP = {
b'\x00\x00': 'TLS_NULL_WITH_NULL_NULL',
b'\x00\x01': 'TLS_RSA_WITH_NULL_MD5',
b'\x00\x02': 'TLS_RSA_WITH_NULL_SHA',
b'\x00\x03': 'TLS_RSA_EXPORT_WITH_RC4_40_MD5',
b'\x00\x04': 'TLS_RSA_WITH_RC4_128_MD5',
b'\x00\x05': 'TLS_RSA_WITH_RC4_128_SHA',
b'\x00\x06': 'TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5',
b'\x00\x07': 'TLS_RSA_WITH_IDEA_CBC_SHA',
b'\x00\x08': 'TLS_RSA_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x09': 'TLS_RSA_WITH_DES_CBC_SHA',
b'\x00\x0A': 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
b'\x00\x0B': 'TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x0C': 'TLS_DH_DSS_WITH_DES_CBC_SHA',
b'\x00\x0D': 'TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA',
b'\x00\x0E': 'TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x0F': 'TLS_DH_RSA_WITH_DES_CBC_SHA',
b'\x00\x10': 'TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA',
b'\x00\x11': 'TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x12': 'TLS_DHE_DSS_WITH_DES_CBC_SHA',
b'\x00\x13': 'TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA',
b'\x00\x14': 'TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x15': 'TLS_DHE_RSA_WITH_DES_CBC_SHA',
b'\x00\x16': 'TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA',
b'\x00\x17': 'TLS_DH_anon_EXPORT_WITH_RC4_40_MD5',
b'\x00\x18': 'TLS_DH_anon_WITH_RC4_128_MD5',
b'\x00\x19': 'TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA',
b'\x00\x1A': 'TLS_DH_anon_WITH_DES_CBC_SHA',
b'\x00\x1B': 'TLS_DH_anon_WITH_3DES_EDE_CBC_SHA',
b'\x00\x1E': 'TLS_KRB5_WITH_DES_CBC_SHA',
b'\x00\x1F': 'TLS_KRB5_WITH_3DES_EDE_CBC_SHA',
b'\x00\x20': 'TLS_KRB5_WITH_RC4_128_SHA',
b'\x00\x21': 'TLS_KRB5_WITH_IDEA_CBC_SHA',
b'\x00\x22': 'TLS_KRB5_WITH_DES_CBC_MD5',
b'\x00\x23': 'TLS_KRB5_WITH_3DES_EDE_CBC_MD5',
b'\x00\x24': 'TLS_KRB5_WITH_RC4_128_MD5',
b'\x00\x25': 'TLS_KRB5_WITH_IDEA_CBC_MD5',
b'\x00\x26': 'TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA',
b'\x00\x27': 'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA',
b'\x00\x28': 'TLS_KRB5_EXPORT_WITH_RC4_40_SHA',
b'\x00\x29': 'TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5',
b'\x00\x2A': 'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5',
b'\x00\x2B': 'TLS_KRB5_EXPORT_WITH_RC4_40_MD5',
b'\x00\x2C': 'TLS_PSK_WITH_NULL_SHA',
b'\x00\x2D': 'TLS_DHE_PSK_WITH_NULL_SHA',
b'\x00\x2E': 'TLS_RSA_PSK_WITH_NULL_SHA',
b'\x00\x2F': 'TLS_RSA_WITH_AES_128_CBC_SHA',
b'\x00\x30': 'TLS_DH_DSS_WITH_AES_128_CBC_SHA',
b'\x00\x31': 'TLS_DH_RSA_WITH_AES_128_CBC_SHA',
b'\x00\x32': 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA',
b'\x00\x33': 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA',
b'\x00\x34': 'TLS_DH_anon_WITH_AES_128_CBC_SHA',
b'\x00\x35': 'TLS_RSA_WITH_AES_256_CBC_SHA',
b'\x00\x36': 'TLS_DH_DSS_WITH_AES_256_CBC_SHA',
b'\x00\x37': 'TLS_DH_RSA_WITH_AES_256_CBC_SHA',
b'\x00\x38': 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA',
b'\x00\x39': 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA',
b'\x00\x3A': 'TLS_DH_anon_WITH_AES_256_CBC_SHA',
b'\x00\x3B': 'TLS_RSA_WITH_NULL_SHA256',
b'\x00\x3C': 'TLS_RSA_WITH_AES_128_CBC_SHA256',
b'\x00\x3D': 'TLS_RSA_WITH_AES_256_CBC_SHA256',
b'\x00\x3E': 'TLS_DH_DSS_WITH_AES_128_CBC_SHA256',
b'\x00\x3F': 'TLS_DH_RSA_WITH_AES_128_CBC_SHA256',
b'\x00\x40': 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256',
b'\x00\x41': 'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x42': 'TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x43': 'TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x44': 'TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x45': 'TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x46': 'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA',
b'\x00\x67': 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256',
b'\x00\x68': 'TLS_DH_DSS_WITH_AES_256_CBC_SHA256',
b'\x00\x69': 'TLS_DH_RSA_WITH_AES_256_CBC_SHA256',
b'\x00\x6A': 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA256',
b'\x00\x6B': 'TLS_DHE_RSA_WITH_AES_256_CBC_SHA256',
b'\x00\x6C': 'TLS_DH_anon_WITH_AES_128_CBC_SHA256',
b'\x00\x6D': 'TLS_DH_anon_WITH_AES_256_CBC_SHA256',
b'\x00\x84': 'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x85': 'TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x86': 'TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x87': 'TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x88': 'TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x89': 'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA',
b'\x00\x8A': 'TLS_PSK_WITH_RC4_128_SHA',
b'\x00\x8B': 'TLS_PSK_WITH_3DES_EDE_CBC_SHA',
b'\x00\x8C': 'TLS_PSK_WITH_AES_128_CBC_SHA',
b'\x00\x8D': 'TLS_PSK_WITH_AES_256_CBC_SHA',
b'\x00\x8E': 'TLS_DHE_PSK_WITH_RC4_128_SHA',
b'\x00\x8F': 'TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA',
b'\x00\x90': 'TLS_DHE_PSK_WITH_AES_128_CBC_SHA',
b'\x00\x91': 'TLS_DHE_PSK_WITH_AES_256_CBC_SHA',
b'\x00\x92': 'TLS_RSA_PSK_WITH_RC4_128_SHA',
b'\x00\x93': 'TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA',
b'\x00\x94': 'TLS_RSA_PSK_WITH_AES_128_CBC_SHA',
b'\x00\x95': 'TLS_RSA_PSK_WITH_AES_256_CBC_SHA',
b'\x00\x96': 'TLS_RSA_WITH_SEED_CBC_SHA',
b'\x00\x97': 'TLS_DH_DSS_WITH_SEED_CBC_SHA',
b'\x00\x98': 'TLS_DH_RSA_WITH_SEED_CBC_SHA',
b'\x00\x99': 'TLS_DHE_DSS_WITH_SEED_CBC_SHA',
b'\x00\x9A': 'TLS_DHE_RSA_WITH_SEED_CBC_SHA',
b'\x00\x9B': 'TLS_DH_anon_WITH_SEED_CBC_SHA',
b'\x00\x9C': 'TLS_RSA_WITH_AES_128_GCM_SHA256',
b'\x00\x9D': 'TLS_RSA_WITH_AES_256_GCM_SHA384',
b'\x00\x9E': 'TLS_DHE_RSA_WITH_AES_128_GCM_SHA256',
b'\x00\x9F': 'TLS_DHE_RSA_WITH_AES_256_GCM_SHA384',
b'\x00\xA0': 'TLS_DH_RSA_WITH_AES_128_GCM_SHA256',
b'\x00\xA1': 'TLS_DH_RSA_WITH_AES_256_GCM_SHA384',
b'\x00\xA2': 'TLS_DHE_DSS_WITH_AES_128_GCM_SHA256',
b'\x00\xA3': 'TLS_DHE_DSS_WITH_AES_256_GCM_SHA384',
b'\x00\xA4': 'TLS_DH_DSS_WITH_AES_128_GCM_SHA256',
b'\x00\xA5': 'TLS_DH_DSS_WITH_AES_256_GCM_SHA384',
b'\x00\xA6': 'TLS_DH_anon_WITH_AES_128_GCM_SHA256',
b'\x00\xA7': 'TLS_DH_anon_WITH_AES_256_GCM_SHA384',
b'\x00\xA8': 'TLS_PSK_WITH_AES_128_GCM_SHA256',
b'\x00\xA9': 'TLS_PSK_WITH_AES_256_GCM_SHA384',
b'\x00\xAA': 'TLS_DHE_PSK_WITH_AES_128_GCM_SHA256',
b'\x00\xAB': 'TLS_DHE_PSK_WITH_AES_256_GCM_SHA384',
b'\x00\xAC': 'TLS_RSA_PSK_WITH_AES_128_GCM_SHA256',
b'\x00\xAD': 'TLS_RSA_PSK_WITH_AES_256_GCM_SHA384',
b'\x00\xAE': 'TLS_PSK_WITH_AES_128_CBC_SHA256',
b'\x00\xAF': 'TLS_PSK_WITH_AES_256_CBC_SHA384',
b'\x00\xB0': 'TLS_PSK_WITH_NULL_SHA256',
b'\x00\xB1': 'TLS_PSK_WITH_NULL_SHA384',
b'\x00\xB2': 'TLS_DHE_PSK_WITH_AES_128_CBC_SHA256',
b'\x00\xB3': 'TLS_DHE_PSK_WITH_AES_256_CBC_SHA384',
b'\x00\xB4': 'TLS_DHE_PSK_WITH_NULL_SHA256',
b'\x00\xB5': 'TLS_DHE_PSK_WITH_NULL_SHA384',
b'\x00\xB6': 'TLS_RSA_PSK_WITH_AES_128_CBC_SHA256',
b'\x00\xB7': 'TLS_RSA_PSK_WITH_AES_256_CBC_SHA384',
b'\x00\xB8': 'TLS_RSA_PSK_WITH_NULL_SHA256',
b'\x00\xB9': 'TLS_RSA_PSK_WITH_NULL_SHA384',
b'\x00\xBA': 'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xBB': 'TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xBC': 'TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xBD': 'TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xBE': 'TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xBF': 'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256',
b'\x00\xC0': 'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xC1': 'TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xC2': 'TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xC3': 'TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xC4': 'TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xC5': 'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256',
b'\x00\xFF': 'TLS_EMPTY_RENEGOTIATION_INFO_SCSV',
b'\x13\x01': 'TLS_AES_128_GCM_SHA256',
b'\x13\x02': 'TLS_AES_256_GCM_SHA384',
b'\x13\x03': 'TLS_CHACHA20_POLY1305_SHA256',
b'\x13\x04': 'TLS_AES_128_CCM_SHA256',
b'\x13\x05': 'TLS_AES_128_CCM_8_SHA256',
b'\xC0\x01': 'TLS_ECDH_ECDSA_WITH_NULL_SHA',
b'\xC0\x02': 'TLS_ECDH_ECDSA_WITH_RC4_128_SHA',
b'\xC0\x03': 'TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x04': 'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA',
b'\xC0\x05': 'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA',
b'\xC0\x06': 'TLS_ECDHE_ECDSA_WITH_NULL_SHA',
b'\xC0\x07': 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA',
b'\xC0\x08': 'TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x09': 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA',
b'\xC0\x0A': 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA',
b'\xC0\x0B': 'TLS_ECDH_RSA_WITH_NULL_SHA',
b'\xC0\x0C': 'TLS_ECDH_RSA_WITH_RC4_128_SHA',
b'\xC0\x0D': 'TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x0E': 'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA',
b'\xC0\x0F': 'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA',
b'\xC0\x10': 'TLS_ECDHE_RSA_WITH_NULL_SHA',
b'\xC0\x11': 'TLS_ECDHE_RSA_WITH_RC4_128_SHA',
b'\xC0\x12': 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x13': 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA',
b'\xC0\x14': 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA',
b'\xC0\x15': 'TLS_ECDH_anon_WITH_NULL_SHA',
b'\xC0\x16': 'TLS_ECDH_anon_WITH_RC4_128_SHA',
b'\xC0\x17': 'TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x18': 'TLS_ECDH_anon_WITH_AES_128_CBC_SHA',
b'\xC0\x19': 'TLS_ECDH_anon_WITH_AES_256_CBC_SHA',
b'\xC0\x1A': 'TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x1B': 'TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x1C': 'TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x1D': 'TLS_SRP_SHA_WITH_AES_128_CBC_SHA',
b'\xC0\x1E': 'TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA',
b'\xC0\x1F': 'TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA',
b'\xC0\x20': 'TLS_SRP_SHA_WITH_AES_256_CBC_SHA',
b'\xC0\x21': 'TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA',
b'\xC0\x22': 'TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA',
b'\xC0\x23': 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256',
b'\xC0\x24': 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384',
b'\xC0\x25': 'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256',
b'\xC0\x26': 'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384',
b'\xC0\x27': 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256',
b'\xC0\x28': 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384',
b'\xC0\x29': 'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256',
b'\xC0\x2A': 'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384',
b'\xC0\x2B': 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256',
b'\xC0\x2C': 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384',
b'\xC0\x2D': 'TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256',
b'\xC0\x2E': 'TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384',
b'\xC0\x2F': 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',
b'\xC0\x30': 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384',
b'\xC0\x31': 'TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256',
b'\xC0\x32': 'TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384',
b'\xC0\x33': 'TLS_ECDHE_PSK_WITH_RC4_128_SHA',
b'\xC0\x34': 'TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA',
b'\xC0\x35': 'TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA',
b'\xC0\x36': 'TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA',
b'\xC0\x37': 'TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256',
b'\xC0\x38': 'TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384',
b'\xC0\x39': 'TLS_ECDHE_PSK_WITH_NULL_SHA',
b'\xC0\x3A': 'TLS_ECDHE_PSK_WITH_NULL_SHA256',
b'\xC0\x3B': 'TLS_ECDHE_PSK_WITH_NULL_SHA384',
b'\xC0\x3C': 'TLS_RSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x3D': 'TLS_RSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x3E': 'TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x3F': 'TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x40': 'TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x41': 'TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x42': 'TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x43': 'TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x44': 'TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x45': 'TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x46': 'TLS_DH_anon_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x47': 'TLS_DH_anon_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x48': 'TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x49': 'TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x4A': 'TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x4B': 'TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x4C': 'TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x4D': 'TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x4E': 'TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x4F': 'TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x50': 'TLS_RSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x51': 'TLS_RSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x52': 'TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x53': 'TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x54': 'TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x55': 'TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x56': 'TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x57': 'TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x58': 'TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x59': 'TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x5A': 'TLS_DH_anon_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x5B': 'TLS_DH_anon_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x5C': 'TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x5D': 'TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x5E': 'TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x5F': 'TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x60': 'TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x61': 'TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x62': 'TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x63': 'TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x64': 'TLS_PSK_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x65': 'TLS_PSK_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x66': 'TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x67': 'TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x68': 'TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x69': 'TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x6A': 'TLS_PSK_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x6B': 'TLS_PSK_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x6C': 'TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x6D': 'TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x6E': 'TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256',
b'\xC0\x6F': 'TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384',
b'\xC0\x70': 'TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256',
b'\xC0\x71': 'TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384',
b'\xC0\x72': 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x73': 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x74': 'TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x75': 'TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x76': 'TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x77': 'TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x78': 'TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x79': 'TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x7A': 'TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x7B': 'TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x7C': 'TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x7D': 'TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x7E': 'TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x7F': 'TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x80': 'TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x81': 'TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x82': 'TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x83': 'TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x84': 'TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x85': 'TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x86': 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x87': 'TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x88': 'TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x89': 'TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x8A': 'TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x8B': 'TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x8C': 'TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x8D': 'TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x8E': 'TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x8F': 'TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x90': 'TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x91': 'TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x92': 'TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256',
b'\xC0\x93': 'TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384',
b'\xC0\x94': 'TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x95': 'TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x96': 'TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x97': 'TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x98': 'TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x99': 'TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x9A': 'TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256',
b'\xC0\x9B': 'TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384',
b'\xC0\x9C': 'TLS_RSA_WITH_AES_128_CCM',
b'\xC0\x9D': 'TLS_RSA_WITH_AES_256_CCM',
b'\xC0\x9E': 'TLS_DHE_RSA_WITH_AES_128_CCM',
b'\xC0\x9F': 'TLS_DHE_RSA_WITH_AES_256_CCM',
b'\xC0\xA0': 'TLS_RSA_WITH_AES_128_CCM_8',
b'\xC0\xA1': 'TLS_RSA_WITH_AES_256_CCM_8',
b'\xC0\xA2': 'TLS_DHE_RSA_WITH_AES_128_CCM_8',
b'\xC0\xA3': 'TLS_DHE_RSA_WITH_AES_256_CCM_8',
b'\xC0\xA4': 'TLS_PSK_WITH_AES_128_CCM',
b'\xC0\xA5': 'TLS_PSK_WITH_AES_256_CCM',
b'\xC0\xA6': 'TLS_DHE_PSK_WITH_AES_128_CCM',
b'\xC0\xA7': 'TLS_DHE_PSK_WITH_AES_256_CCM',
b'\xC0\xA8': 'TLS_PSK_WITH_AES_128_CCM_8',
b'\xC0\xA9': 'TLS_PSK_WITH_AES_256_CCM_8',
b'\xC0\xAA': 'TLS_PSK_DHE_WITH_AES_128_CCM_8',
b'\xC0\xAB': 'TLS_PSK_DHE_WITH_AES_256_CCM_8',
b'\xCC\xA8': 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xA9': 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xAA': 'TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xAB': 'TLS_PSK_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xAC': 'TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xAD': 'TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256',
b'\xCC\xAE': 'TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256',
}
|
build/fbcode_builder/specs/rocksdb.py | facebookxx/folly | 1,831 | 12683762 | <reponame>facebookxx/folly
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def fbcode_builder_spec(builder):
builder.add_option("rocksdb/_build:cmake_defines", {
"USE_RTTI": "1",
"PORTABLE": "ON",
})
return {
"steps": [
builder.fb_github_cmake_install("rocksdb/_build"),
],
}
|
third_party/gsutil/third_party/apitools/run_pylint.py | tingshao/catapult | 2,151 | 12683779 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom script to run PyLint on apitools codebase.
"Inspired" by the similar script in gcloud-python.
This runs pylint as a script via subprocess in two different
subprocesses. The first lints the production/library code
using the default rc file (PRODUCTION_RC). The second lints the
demo/test code using an rc file (TEST_RC) which allows more style
violations (hence it has a reduced number of style checks).
"""
import ConfigParser
import copy
import os
import subprocess
import sys
IGNORED_DIRECTORIES = [
'apitools/gen/testdata',
'samples/bigquery_sample/bigquery_v2',
'samples/dns_sample/dns_v1',
'samples/fusiontables_sample/fusiontables_v1',
'samples/iam_sample/iam_v1',
'samples/servicemanagement_sample/servicemanagement_v1',
'samples/storage_sample/storage_v1',
'venv',
]
IGNORED_FILES = [
'ez_setup.py',
'run_pylint.py',
'setup.py',
'apitools/base/py/gzip.py',
'apitools/base/py/gzip_test.py',
]
PRODUCTION_RC = 'default.pylintrc'
TEST_RC = 'reduced.pylintrc'
TEST_DISABLED_MESSAGES = [
'exec-used',
'invalid-name',
'missing-docstring',
'protected-access',
]
TEST_RC_ADDITIONS = {
'MESSAGES CONTROL': {
'disable': ',\n'.join(TEST_DISABLED_MESSAGES),
},
}
def read_config(filename):
"""Reads pylintrc config onto native ConfigParser object."""
config = ConfigParser.ConfigParser()
with open(filename, 'r') as file_obj:
config.readfp(file_obj)
return config
def make_test_rc(base_rc_filename, additions_dict, target_filename):
"""Combines a base rc and test additions into single file."""
main_cfg = read_config(base_rc_filename)
# Create fresh config for test, which must extend production.
test_cfg = ConfigParser.ConfigParser()
test_cfg._sections = copy.deepcopy(main_cfg._sections)
for section, opts in additions_dict.items():
curr_section = test_cfg._sections.setdefault(
section, test_cfg._dict())
for opt, opt_val in opts.items():
curr_val = curr_section.get(opt)
if curr_val is None:
raise KeyError('Expected to be adding to existing option.')
curr_section[opt] = '%s\n%s' % (curr_val, opt_val)
with open(target_filename, 'w') as file_obj:
test_cfg.write(file_obj)
def valid_filename(filename):
"""Checks if a file is a Python file and is not ignored."""
for directory in IGNORED_DIRECTORIES:
if filename.startswith(directory):
return False
return (filename.endswith('.py') and
filename not in IGNORED_FILES)
def is_production_filename(filename):
"""Checks if the file contains production code.
:rtype: boolean
:returns: Boolean indicating production status.
"""
return not ('demo' in filename or 'test' in filename or
filename.startswith('regression'))
def get_files_for_linting(allow_limited=True, diff_base=None):
"""Gets a list of files in the repository.
By default, returns all files via ``git ls-files``. However, in some cases
uses a specific commit or branch (a so-called diff base) to compare
against for changed files. (This requires ``allow_limited=True``.)
To speed up linting on Travis pull requests against master, we manually
set the diff base to origin/master. We don't do this on non-pull requests
since origin/master will be equivalent to the currently checked out code.
One could potentially use ${TRAVIS_COMMIT_RANGE} to find a diff base but
this value is not dependable.
:type allow_limited: boolean
:param allow_limited: Boolean indicating if a reduced set of files can
be used.
:rtype: pair
:returns: Tuple of the diff base using the the list of filenames to be
linted.
"""
if os.getenv('TRAVIS') == 'true':
# In travis, don't default to master.
diff_base = None
if (os.getenv('TRAVIS_BRANCH') == 'master' and
os.getenv('TRAVIS_PULL_REQUEST') != 'false'):
# In the case of a pull request into master, we want to
# diff against HEAD in master.
diff_base = 'origin/master'
if diff_base is not None and allow_limited:
result = subprocess.check_output(['git', 'diff', '--name-only',
diff_base])
print 'Using files changed relative to %s:' % (diff_base,)
print '-' * 60
print result.rstrip('\n') # Don't print trailing newlines.
print '-' * 60
else:
print 'Diff base not specified, listing all files in repository.'
result = subprocess.check_output(['git', 'ls-files'])
return result.rstrip('\n').split('\n'), diff_base
def get_python_files(all_files=None, diff_base=None):
"""Gets a list of all Python files in the repository that need linting.
Relies on :func:`get_files_for_linting()` to determine which files should
be considered.
NOTE: This requires ``git`` to be installed and requires that this
is run within the ``git`` repository.
:type all_files: list or ``NoneType``
:param all_files: Optional list of files to be linted.
:rtype: tuple
:returns: A tuple containing two lists and a boolean. The first list
contains all production files, the next all test/demo files and
the boolean indicates if a restricted fileset was used.
"""
using_restricted = False
if all_files is None:
all_files, diff_base = get_files_for_linting(diff_base=diff_base)
using_restricted = diff_base is not None
library_files = []
non_library_files = []
for filename in all_files:
if valid_filename(filename):
if is_production_filename(filename):
library_files.append(filename)
else:
non_library_files.append(filename)
return library_files, non_library_files, using_restricted
def lint_fileset(filenames, rcfile, description):
"""Lints a group of files using a given rcfile."""
# Only lint filenames that exist. For example, 'git diff --name-only'
# could spit out deleted / renamed files. Another alternative could
# be to use 'git diff --name-status' and filter out files with a
# status of 'D'.
filenames = [filename for filename in filenames
if os.path.exists(filename)]
if filenames:
rc_flag = '--rcfile=%s' % (rcfile,)
pylint_shell_command = ['pylint', rc_flag] + filenames
status_code = subprocess.call(pylint_shell_command)
if status_code != 0:
error_message = ('Pylint failed on %s with '
'status %d.' % (description, status_code))
print >> sys.stderr, error_message
sys.exit(status_code)
else:
print 'Skipping %s, no files to lint.' % (description,)
def main(argv):
"""Script entry point. Lints both sets of files."""
diff_base = argv[1] if len(argv) > 1 else None
make_test_rc(PRODUCTION_RC, TEST_RC_ADDITIONS, TEST_RC)
library_files, non_library_files, using_restricted = get_python_files(
diff_base=diff_base)
try:
lint_fileset(library_files, PRODUCTION_RC, 'library code')
lint_fileset(non_library_files, TEST_RC, 'test and demo code')
except SystemExit:
if not using_restricted:
raise
message = 'Restricted lint failed, expanding to full fileset.'
print >> sys.stderr, message
all_files, _ = get_files_for_linting(allow_limited=False)
library_files, non_library_files, _ = get_python_files(
all_files=all_files)
lint_fileset(library_files, PRODUCTION_RC, 'library code')
lint_fileset(non_library_files, TEST_RC, 'test and demo code')
if __name__ == '__main__':
main(sys.argv)
|
python/cross_service/aurora_rest_lending_library/library_api/test/test_library_data.py | iconara/aws-doc-sdk-examples | 5,166 | 12683785 | <gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for library_data.py functions.
"""
import datetime
import pytest
import boto3
from botocore.exceptions import ClientError
from botocore.stub import ANY
from chalicelib.library_data import Storage
CLUSTER_ARN = 'arn:aws:rds:us-west-2:123456789012:cluster:test-cluster'
SECRET_ARN = 'arn:aws:secretsmanager:us-west-2:123456789012:secret:test-secret-111111'
DB_NAME = 'testdatabase'
def make_storage_n_stubber(make_stubber):
rdsdata_client = boto3.client('rds-data')
storage = Storage(
{'DBClusterArn': CLUSTER_ARN}, {'ARN': SECRET_ARN}, DB_NAME, rdsdata_client)
return storage, make_stubber(rdsdata_client)
def test_bootstrap_tables(make_stubber):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
for _ in storage._tables:
rdsdata_stubber.stub_execute_statement(CLUSTER_ARN, SECRET_ARN, DB_NAME, ANY)
storage.bootstrap_tables()
def test_add_books(make_stubber):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
books = [
{'title': 'Book One', 'author': '<NAME>'},
{'title': 'Second Book', 'author': '<NAME>'},
{'title': 'Book One 2 (the sequel)', 'author': '<NAME>'}]
author_sql = "INSERT INTO Authors (FirstName, LastName) " \
"VALUES (:FirstName, :LastName)"
authors = {book['author']: {
'FirstName': ' '.join(book['author'].split(' ')[:-1]),
'LastName': book['author'].split(' ')[-1]
} for book in books}
author_param_sets = [[
{'name': 'FirstName',
'value': {'stringValue': author['FirstName']}},
{'name': 'LastName', 'value': {'stringValue': author['LastName']}}]
for author in authors.values()]
author_generated_field_sets = [[1], [2]]
book_sql = "INSERT INTO Books (Title, AuthorID) VALUES (:Title, :AuthorID)"
book_param_sets = [[
{'name': 'Title', 'value': {'stringValue': book['title']}},
{'name': 'AuthorID', 'value': {'longValue': author_id}}]
for book, author_id in zip(books, [1, 2, 1])]
book_generated_field_sets = [[11], [22], [33]]
rdsdata_stubber.stub_batch_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, author_sql, sql_param_sets=author_param_sets,
generated_field_sets=author_generated_field_sets)
rdsdata_stubber.stub_batch_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, book_sql, sql_param_sets=book_param_sets,
generated_field_sets=book_generated_field_sets)
author_count, book_count = storage.add_books(books)
assert author_count == 2
assert book_count == 3
@pytest.mark.parametrize('author_id,error_code', [
(None, None), (13, None), (None, 'TestException')])
def test_get_books(make_stubber, author_id, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
sql = "SELECT Books.BookID, Books.Title, Authors.AuthorID, " \
"Authors.FirstName, Authors.LastName FROM Books " \
"INNER JOIN Authors ON Books.AuthorID=Authors.AuthorID"
sql_params = None
if author_id is not None:
sql += " WHERE Authors.AuthorID = :Authors_AuthorID"
sql_params = [
{'name': 'Authors_AuthorID',
'value': {'longValue': author_id}}]
records = [
[1, 'Title One', 1, 'Freddy', 'Fake'],
[2, 'Title Two', 13, 'Peter', 'Pretend']]
rdsdata_stubber.stub_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, sql, sql_params=sql_params, records=records,
error_code=error_code)
if error_code is None:
got_books = storage.get_books(author_id)
assert [list(book.values()) for book in got_books] == records
else:
with pytest.raises(ClientError) as exc_info:
storage.get_books(author_id)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_execute_statement')])
def test_add_book(make_stubber, stub_runner, error_code, stop_on_method):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
transaction_id = 'trid-747'
book = {'Books.Title': 'Test Book', 'Authors.FirstName': 'Teddy',
'Authors.LastName': 'Tester'}
author_sql = \
"INSERT INTO Authors (FirstName, LastName) VALUES (:FirstName, :LastName)"
author_params = [
{'name': 'FirstName', 'value': {'stringValue': 'Teddy'}},
{'name': 'LastName', 'value': {'stringValue': 'Tester'}}]
author_id = 101
book_sql = "INSERT INTO Books (Title, AuthorID) VALUES (:Title, :AuthorID)"
book_params = [
{'name': 'Title', 'value': {'stringValue': 'Test Book'}},
{'name': 'AuthorID', 'value': {'longValue': author_id}}]
book_id = 66
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
rdsdata_stubber.stub_begin_transaction, CLUSTER_ARN, SECRET_ARN, DB_NAME,
transaction_id)
runner.add(
rdsdata_stubber.stub_execute_statement, CLUSTER_ARN, SECRET_ARN, DB_NAME,
author_sql, author_params, transaction_id=transaction_id,
generated_fields=[author_id])
runner.add(
rdsdata_stubber.stub_execute_statement, CLUSTER_ARN, SECRET_ARN, DB_NAME,
book_sql, book_params, transaction_id=transaction_id,
generated_fields=[book_id])
runner.add(rdsdata_stubber.stub_commit_transaction, CLUSTER_ARN, SECRET_ARN,
transaction_id)
if error_code is not None:
rdsdata_stubber.stub_rollack_transaction(
CLUSTER_ARN, SECRET_ARN, transaction_id)
result = storage.add_book(book)
if error_code is None:
assert result == (author_id, book_id)
else:
assert result is None
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_get_authors(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
sql = "SELECT Authors.AuthorID, Authors.FirstName, Authors.LastName FROM Authors "
records = [
[1, 'Freddy', 'Fake'],
[13, 'Peter', 'Pretend']]
rdsdata_stubber.stub_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, sql, records=records, error_code=error_code)
if error_code is None:
got_authors = storage.get_authors()
assert [list(author.values()) for author in got_authors] == records
else:
with pytest.raises(ClientError) as exc_info:
storage.get_authors()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_get_patrons(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
sql = "SELECT Patrons.PatronID, Patrons.FirstName, Patrons.LastName FROM Patrons "
records = [
[1, 'Randall', 'Reader'],
[13, 'Bob', 'Booker']]
rdsdata_stubber.stub_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, sql, records=records, error_code=error_code)
if error_code is None:
got_patrons = storage.get_patrons()
assert [list(patron.values()) for patron in got_patrons] == records
else:
with pytest.raises(ClientError) as exc_info:
storage.get_patrons()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_add_patron(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
patron = {'Patrons.FirstName': 'Marguerite', 'Patrons.LastName': 'Magazine'}
patron_sql = \
"INSERT INTO Patrons (FirstName, LastName) VALUES (:FirstName, :LastName)"
patron_params = [
{'name': 'Patrons.FirstName', 'value': {'stringValue': 'Marguerite'}},
{'name': 'Patrons.LastName', 'value': {'stringValue': 'Magazine'}}]
patron_id = 36
rdsdata_stubber.stub_execute_statement(CLUSTER_ARN, SECRET_ARN, DB_NAME,
patron_sql, patron_params, generated_fields=[patron_id], error_code=error_code)
if error_code is None:
got_patron_id = storage.add_patron(patron)
assert got_patron_id == patron_id
else:
with pytest.raises(ClientError) as exc_info:
storage.add_patron(patron)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_delete_patron(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
patron_id = 38
patron_sql = \
"DELETE FROM Patrons WHERE PatronID=:PatronID"
patron_params = [{'name': 'PatronID', 'value': {'longValue': 38}}]
rdsdata_stubber.stub_execute_statement(CLUSTER_ARN, SECRET_ARN, DB_NAME,
patron_sql, patron_params, error_code=error_code)
if error_code is None:
storage.delete_patron(patron_id)
else:
with pytest.raises(ClientError) as exc_info:
storage.delete_patron(patron_id)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_get_borrowed_books(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
sql = "SELECT Lending.LendingID, Books.BookID, Books.Title, " \
"Authors.AuthorID, Authors.FirstName, Authors.LastName, " \
"Patrons.PatronID, Patrons.FirstName, Patrons.LastName, " \
"Lending.Lent, Lending.Returned " \
"FROM Lending " \
"INNER JOIN Books ON Lending.BookID=Books.BookID " \
"INNER JOIN Authors ON Books.AuthorID=Authors.AuthorID " \
"INNER JOIN Patrons ON Lending.PatronID=Patrons.PatronID " \
"WHERE Lending.Lent >= :Lending_Lent " \
"AND Lending.Returned IS :Lending_Returned"
sql_params = [{'name': 'Lending_Lent',
'value': {'stringValue': str(datetime.date.today())}},
{'name': 'Lending_Returned', 'value': {'isNull': True}}]
records = [
[1, 5, 'Writing Words', 10, 'Walter', 'Writer', 55, 'Randall', 'Reader',
str(datetime.date.today())],
[13, 39, 'Thirteen', 1300, 'Theodore', 'Three', 103, 'Bob', 'Booker',
str(datetime.date(2018, 10, 11))]]
rdsdata_stubber.stub_execute_statement(
CLUSTER_ARN, SECRET_ARN, DB_NAME, sql, sql_params=sql_params, records=records,
error_code=error_code)
if error_code is None:
got_books = storage.get_borrowed_books()
assert [list(book.values()) for book in got_books] == records
else:
with pytest.raises(ClientError) as exc_info:
storage.get_borrowed_books()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_borrow_book(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
book_id = 35
patron_id = 405
sql = \
"INSERT INTO Lending (BookID, PatronID, Lent, Returned) " \
"VALUES (:BookID, :PatronID, :Lent, :Returned)"
sql_params = [
{'name': 'BookID', 'value': {'longValue': 35}},
{'name': 'PatronID', 'value': {'longValue': 405}},
{'name': 'Lent', 'typeHint': 'DATE',
'value': {'stringValue': str(datetime.date.today())}},
{'name': 'Returned', 'value': {'isNull': True}}]
lending_id = 5000
rdsdata_stubber.stub_execute_statement(CLUSTER_ARN, SECRET_ARN, DB_NAME,
sql, sql_params, generated_fields=[lending_id], error_code=error_code)
if error_code is None:
got_lending_id = storage.borrow_book(book_id, patron_id)
assert got_lending_id == lending_id
else:
with pytest.raises(ClientError) as exc_info:
storage.borrow_book(book_id, patron_id)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_return_book(make_stubber, error_code):
storage, rdsdata_stubber = make_storage_n_stubber(make_stubber)
book_id = 35
patron_id = 405
sql = \
"UPDATE Lending SET Returned=:set_Returned " \
"WHERE Lending.BookID = :Lending_BookID AND " \
"Lending.PatronID = :Lending_PatronID AND " \
"Lending.Returned IS :Lending_Returned"
sql_params = [
{'name': 'set_Returned', 'typeHint': 'DATE',
'value': {'stringValue': str(datetime.date.today())}},
{'name': 'Lending_BookID', 'value': {'longValue': 35}},
{'name': 'Lending_PatronID', 'value': {'longValue': 405}},
{'name': 'Lending_Returned', 'value': {'isNull': True}}]
rdsdata_stubber.stub_execute_statement(CLUSTER_ARN, SECRET_ARN, DB_NAME,
sql, sql_params, error_code=error_code)
if error_code is None:
storage.return_book(book_id, patron_id)
else:
with pytest.raises(ClientError) as exc_info:
storage.return_book(book_id, patron_id)
assert exc_info.value.response['Error']['Code'] == error_code
|
analysis/dask/old_encoding/xgb-dask.py | szilard/GBM-perf | 201 | 12683794 | import pandas as pd
from sklearn import metrics
from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask.array as da
from dask_ml import preprocessing
import xgboost as xgb
cluster = LocalCluster(n_workers=16, threads_per_worker=1)
client = Client(cluster)
d_train = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/train-1m.csv")
d_test = pd.read_csv("https://s3.amazonaws.com/benchm-ml--main/test.csv")
d_all = pd.concat([d_train,d_test])
dx_all = dd.from_pandas(d_all, npartitions=16)
vars_cat = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"]
vars_num = ["DepTime","Distance"]
for col in vars_cat:
dx_all[col] = preprocessing.LabelEncoder().fit_transform(dx_all[col])
X_all = dx_all[vars_cat+vars_num].to_dask_array(lengths=True)
y_all = da.where((dx_all["dep_delayed_15min"]=="Y").to_dask_array(lengths=True),1,0)
X_train = X_all[0:d_train.shape[0],]
y_train = y_all[0:d_train.shape[0]]
X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0]),]
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]
X_train.persist()
y_train.persist()
client.has_what()
dxgb_train = xgb.dask.DaskDMatrix(client, X_train, y_train)
dxgb_test = xgb.dask.DaskDMatrix(client, X_test)
param = {'objective':'binary:logistic', 'tree_method':'hist', 'max_depth':10, 'eta':0.1}
%time md = xgb.dask.train(client, param, dxgb_train, num_boost_round = 100)
y_pred = xgb.dask.predict(client, md, dxgb_test)
y_pred_loc = y_pred.compute()
y_test_loc = y_test.compute()
print(metrics.roc_auc_score(y_test_loc, y_pred_loc))
## m5.4xlarge 16c (8+8HT)
## Wall time: 20.5 s
## 0.7958538649110775
|
odk_logger/tests/__init__.py | Ecotrust/formhub | 123 | 12683797 | from parsing_tests import *
#from instance_creation_test import *
#from test_simple_submission import *
#from test_import_tools import *
#from test_form_submission import *
#from test_update_xform_uuid import *
#from test_command_syncd_deleted_instances_fix import *
#from test_webforms import *
#from test_publish_xls import *
#from test_backup_tools import *
|
src/jNlp/edict_search_monash/edict_examples.py | Reynolddoss/jProcessing | 133 | 12683803 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This package uses the EDICT_ and KANJIDIC_ dictionary files.
These files are the property of the
Electronic Dictionary Research and Development Group_ , and
are used in conformance with the Group's licence_ .
.. _EDICT: http://www.csse.monash.edu.au/~jwb/edict.html
.. _KANJIDIC: http://www.csse.monash.edu.au/~jwb/kanjidic.html
.. _Group: http://www.edrdg.org/
.. _licence: http://www.edrdg.org/edrdg/licence.html
..
"""
# Copyright (c) 2011, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Edict Parser By **<NAME>**, see ``edict_search.py``
Edict Example sentences, by search query, **Pulkit Kathuria**
Edict examples pickle files are provided but latest example files
can be downloaded from the links provided.
Charset:
- utf-8 charset example file
- ISO-8859-1 edict_dictionary file
Outputs example sentences for a query in Japanese only for ambiguous words.
"""
import re, os, subprocess
from jNlp.edict_search_monash.edict_search import Parser
import cPickle as pickle
def word_and_id(BSent):
results = []
for item in BSent.split():
brackets = re.compile('\[.*?\]')
flter = re.sub('\(.*?\)','',item)
word = re.split('\[|\]', re.sub('\{.*?\}','',flter))[0]
try: s_id = re.split('\[|\]', re.sub('\{.*?\}','',flter))[1]
except: pass
if re.search(brackets, flter):
results.append((word, s_id))
return results
def parse_examples(edict_examples_file):
"""
Edict examples format
---------------------
::
A: 誰にでも長所と.. Everyone has....points.#ID=276471_4870
B: 才[01]{歳} 以上[01] 生きる (こと){こと} は 決して ..
ambiguous_words: @type = dictionary
format: Kanji ==> id ==> [examples_sent_id, ..]
才 ==> 01 ==> [#ID=276471_4870, ...]
call:
>>> ambiguous_words[kanji][01]
...[#ID=276471_4870, ...]
edict_examples: @type = dictionary
format:
ID ==> u'example_sentence'
#ID=276471_4870 ==> u'誰にでも長所と.. Everyone has....points'
"""
ambiguous_words = {}
edict_examples = {}
for line in edict_examples_file.readlines():
line = unicode(line,'utf-8')
if line.startswith('A:'):
eg_sent = line.split('#ID=')[0]
eg_sent_id = line.split('#ID=')[1]
edict_examples[eg_sent_id] = eg_sent
continue
for item in word_and_id(line):
word = item[0]
s_id = int(item[1])
if not ambiguous_words.has_key(word): ambiguous_words[word] = {}
if not ambiguous_words[word].has_key(s_id): ambiguous_words[word][s_id] = []
ambiguous_words[word][s_id].append(eg_sent_id)
return ambiguous_words, edict_examples
def edict_entry(edict_file_path, query):
kp = Parser(edict_file_path)
for entry in kp.search(query):
if entry.to_string().split()[0] == query:
entry = entry.to_string()
glosses = re.findall('\(\d\).*?;',entry)
s_ids = [int(re.search('\d',gloss).group(0)) for gloss in glosses]
return s_ids, glosses
return [],[]
def check_pickles(edict_examples_path):
f = open(edict_examples_path)
__checkpickles__ = ['edict_examples.p','ambiguous_words.p']
for pickl in __checkpickles__:
if not os.path.exists(pickl):
ambiguous_words, edict_examples = parse_examples(f)
pickle.dump(ambiguous_words, open("ambiguous_words.p",'wb'))
pickle.dump(edict_examples, open("edict_examples.p",'wb'))
else:
ambiguous_words = pickle.load(open('ambiguous_words.p'))
edict_examples = pickle.load(open('edict_examples.p'))
return ambiguous_words, edict_examples
def search_with_example(edict_path, edict_examples_path, query):
ambiguous_words, edict_examples = check_pickles(edict_examples_path)
s_ids, glosses = edict_entry(edict_path, query)
print query.encode('utf-8')
for s_id, gloss in enumerate(glosses):
print
print 'Sense', gloss
if ambiguous_words.has_key(query) and ambiguous_words[query].has_key(s_ids[s_id]):
for ex_num, ex_id in enumerate(ambiguous_words[query][s_ids[s_id]], 1):
ex_sentence = edict_examples[ex_id].replace(query[0], '*'+query[0]+'*')
print '\t', ex_sentence.replace('A:','EX:'+str(ex_num).zfill(2)).encode('utf-8')
def _mime(f_path):
command = ['file','--mime',f_path]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
charset = process.communicate()[0].split('charset=')[1]
return charset.strip()
def _encoding_check(edict_path, edict_examples_path):
if _mime(edict_path) <> 'iso-8859-1' or _mime(edict_examples_path) <>'utf-8':
print _mime(edict_path)
print 'examples file must utf-8 encoded'
print 'edict dictionary must be iso-8859-1 encoded'
print 'man iconv'
return True
if __name__ == '__main__':
query = u'水'
edict_path = '../_dicts/edict-2011-08-30'
edict_examples_path = '../_dicts/edict_examples'
search_with_example(edict_path, edict_examples_path, query)
|
qf_lib_tests/unit_tests/containers/test_future_ticker.py | webclinic017/qf-lib | 198 | 12683804 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.common.utils.dateutils.date_format import DateFormat
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.common.utils.dateutils.timer import SettableTimer
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.data_providers.bloomberg import BloombergDataProvider
from qf_lib_tests.unit_tests.config.test_settings import get_test_settings
class CustomTicker(Ticker):
def from_string(self, ticker_str):
pass
class CustomFutureTicker(FutureTicker, CustomTicker):
def belongs_to_family(self, ticker: CustomTicker) -> bool:
pass
def _get_futures_chain_tickers(self):
tickers = [
CustomTicker("A"),
CustomTicker("B"),
CustomTicker("C"),
CustomTicker("D"),
CustomTicker("E"),
CustomTicker("F"),
CustomTicker("G")
]
exp_dates = [
str_to_date('2017-11-13'),
str_to_date('2017-12-15'),
str_to_date('2018-01-12'),
str_to_date('2018-02-13'),
str_to_date('2018-03-15'),
str_to_date('2018-04-14'),
str_to_date('2018-05-13')
]
return QFSeries(data=tickers, index=exp_dates)
class TestSeries(unittest.TestCase):
def setUp(self):
self.timer = SettableTimer(initial_time=str_to_date('2017-01-01'))
settings = get_test_settings()
self.bbg_provider = BloombergDataProvider(settings)
def test_valid_ticker_1(self):
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 1, 5, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
# '2017-12-15' is the official expiration date of CustomTicker:B, setting the days_before_exp_date equal to
# 5 forces the expiration to occur on the 11th ('2017-12-15' - 5 days = '2017-12-10' is the last day of old
# contract).
self.timer.set_current_time(str_to_date('2017-12-05'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-10'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-11'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
def test_valid_ticker_2(self):
# Test the 2nd contract instead of front one
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 2, 5, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
self.timer.set_current_time(str_to_date('2017-12-05'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-10'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-11'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("D"))
def test_valid_ticker_3(self):
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 1, 45, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
self.timer.set_current_time(str_to_date('2017-11-28'))
# '2017-11-28' + 45 days = '2018-01-12' - the front contract will be equal to CustomTicker:D
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-11-29'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("D"))
self.timer.set_current_time(str_to_date('2017-12-05'))
# '2017-12-05' + 45 days = '2018-01-19' - the front contract will be equal to CustomTicker:D
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("D"))
def test_valid_ticker_4(self):
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 2, 45, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
self.timer.set_current_time(str_to_date('2017-11-28'))
# '2017-11-28' + 45 days = '2018-01-12' - the front contract will be equal to CustomTicker:D
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("D"))
self.timer.set_current_time(str_to_date('2017-11-29'))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("E"))
self.timer.set_current_time(str_to_date('2017-12-05'))
# '2017-12-05' + 45 days = '2018-01-19' - the front contract will be equal to CustomTicker:D
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("E"))
def test_set_expiration_hour__first_caching_before_exp_hour(self):
""" Test set expiration hour when the first caching occurs on the expiration day, before expiration hour. """
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 1, 5, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
future_ticker.set_expiration_hour(hour=8, minute=10)
self.timer.set_current_time(str_to_date('2017-12-11 00:00:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-11 07:59:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-11 08:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-11 07:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-11 09:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
def test_set_expiration_hour__first_caching_after_exp_hour(self):
""" Test set expiration hour when the first caching occurs a day before the expiration day, after
expiration hour. """
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 1, 5, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
future_ticker.set_expiration_hour(hour=10, minute=10)
self.timer.set_current_time(str_to_date('2017-12-10 19:00:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
self.timer.set_current_time(str_to_date('2017-12-11 10:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-11 11:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
def test_set_expiration_hour__first_caching_at_exp_hour(self):
""" Test set expiration hour when the first caching occurs a day before the expiration day, at
expiration hour. """
future_ticker = CustomFutureTicker("Custom", "CT{} Custom", 1, 5, 500)
future_ticker.initialize_data_provider(self.timer, self.bbg_provider)
future_ticker.set_expiration_hour(hour=8, minute=10)
self.timer.set_current_time(str_to_date('2017-12-11 08:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-11 09:10:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("C"))
self.timer.set_current_time(str_to_date('2017-12-10 19:00:00.0', DateFormat.FULL_ISO))
self.assertEqual(future_ticker.get_current_specific_ticker(), CustomTicker("B"))
if __name__ == '__main__':
unittest.main()
|
Webapp/module_locator.py | akbarman8/KSCrash | 3,369 | 12683821 | # from http://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python
import os
import sys
def we_are_frozen():
# All of the modules are built-in to the interpreter, e.g., by py2exe
return hasattr(sys, "frozen")
def module_path():
encoding = sys.getfilesystemencoding()
if we_are_frozen():
return os.path.dirname(unicode(sys.executable, encoding))
return os.path.dirname(unicode(__file__, encoding))
|
examples/events/tools.py | chrisinmtown/PyMISP | 307 | 12683823 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from random import randint
import string
from pymisp import MISPEvent, MISPAttribute
def randomStringGenerator(size, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def randomIpGenerator():
return str(randint(0, 255)) + '.' + str(randint(0, 255)) + '.' + str(randint(0, 255)) + '.' + str(randint(0, 255))
def _attribute(category, type, value):
attribute = MISPAttribute()
attribute.category = category
attribute.type = type
attribute.value = value
return attribute
def floodtxt(misp, event, maxlength=255):
text = randomStringGenerator(randint(1, maxlength))
choose_from = [('Internal reference', 'comment', text), ('Internal reference', 'text', text),
('Internal reference', 'other', text), ('Network activity', 'email-subject', text),
('Artifacts dropped', 'mutex', text), ('Artifacts dropped', 'filename', text)]
misp.add_attribute(event, _attribute(*random.choice(choose_from)))
def floodip(misp, event):
ip = randomIpGenerator()
choose_from = [('Network activity', 'ip-src', ip), ('Network activity', 'ip-dst', ip)]
misp.add_attribute(event, _attribute(*random.choice(choose_from)))
def flooddomain(misp, event, maxlength=25):
a = randomStringGenerator(randint(1, maxlength))
b = randomStringGenerator(randint(2, 3), chars=string.ascii_lowercase)
domain = a + '.' + b
choose_from = [('Network activity', 'domain', domain), ('Network activity', 'hostname', domain)]
misp.add_attribute(event, _attribute(*random.choice(choose_from)))
def floodemail(misp, event, maxlength=25):
a = randomStringGenerator(randint(1, maxlength))
b = randomStringGenerator(randint(1, maxlength))
c = randomStringGenerator(randint(2, 3), chars=string.ascii_lowercase)
email = a + '@' + b + '.' + c
choose_from = [('Network activity', 'email-dst', email), ('Network activity', 'email-src', email)]
misp.add_attribute(event, _attribute(*random.choice(choose_from)))
def create_dummy_event(misp):
event = MISPEvent()
event.info = 'Dummy event'
event = misp.add_event(event, pythonify=True)
return event
def create_massive_dummy_events(misp, nbattribute):
event = MISPEvent()
event.info = 'massive dummy event'
event = misp.add_event(event)
print(event)
functions = [floodtxt, floodip, flooddomain, floodemail]
for i in range(nbattribute):
functions[random.randint(0, len(functions) - 1)](misp, event)
|
odin/tests/test_position.py | gsamarakoon/Odin | 103 | 12683834 | import unittest
import datetime as dt
from odin.handlers.position_handler.position import FilledPosition
from odin.utilities import params
class TestPosition(unittest.TestCase):
def test_to_database_position(self):
s = "SPY"
q = 100
d = params.Directions.long_dir
t = params.TradeTypes.buy_trade
a = params.action_dict[(d, t)]
pid = "test_portfolio_id"
date = dt.datetime.today()
price = 100.0
update_price = 101.0
pos = FilledPosition(s, d, t, pid, date, price)
pos.transact_shares(a, q, price)
pos.to_database_position()
def test_from_database_position(self):
s = "SPY"
pid = "test_portfolio_id"
pos = FilledPosition.from_database_position(pid, s)
self.assertEqual(pos.avg_price, 100.01)
self.assertEqual(pos.portfolio_id, pid)
self.assertEqual(pos.quantity, 100)
self.assertEqual(pos.direction, params.Directions.long_dir)
self.assertEqual(pos.trade_type, params.TradeTypes.buy_trade)
def test_long_position(self):
s = "GOOG"
q = 100
d = params.Directions.long_dir
t = params.TradeTypes.buy_trade
a = params.action_dict[(d, t)]
pid = "test_portfolio_id"
date = dt.datetime.today()
price = 100.0
update_price = 101.0
pos = FilledPosition(s, d, t, pid, date, price)
pos.transact_shares(a, q, price)
pos.update_market_value(update_price)
self.assertEqual(
pos.percent_pnl,
1 + (pos.market_value - pos.cost_basis) / pos.cost_basis
)
self.assertEqual(pos.quantity, q)
self.assertEqual(pos.market_value, 10100.0)
self.assertEqual(pos.unrealized_pnl, 99.0)
self.assertEqual(pos.tot_commission, 1.0)
sell_price = 100.5
pos.transact_shares(params.Actions.sell, q // 2, sell_price)
self.assertEqual(pos.quantity, q // 2)
self.assertEqual(pos.realized_pnl, 48.0)
self.assertEqual(pos.unrealized_pnl, 24.5)
self.assertEqual(pos.tot_commission, 2.0)
sell_price = 101.0
pos.transact_shares(params.Actions.sell, q // 2, sell_price)
self.assertEqual(pos.quantity, 0)
self.assertEqual(pos.realized_pnl, 72.0)
self.assertEqual(pos.unrealized_pnl, 0.)
self.assertEqual(pos.tot_commission, 3.0)
def test_short_position(self):
s = "GOOG"
q = 100
d = params.Directions.short_dir
t = params.TradeTypes.buy_trade
a = params.action_dict[(d, t)]
pid = "test_portfolio_id"
date = dt.datetime.today()
price = 100.0
update_price = 101.0
pos = FilledPosition(s, d, t, pid, date, price)
pos.transact_shares(a, q, price)
pos.update_market_value(update_price)
self.assertEqual(
pos.percent_pnl,
1 - (pos.market_value - pos.cost_basis) / pos.cost_basis
)
self.assertEqual(pos.quantity, q)
self.assertEqual(pos.market_value, -10100.0)
self.assertEqual(pos.unrealized_pnl, -101.0)
self.assertEqual(pos.tot_commission, 1.0)
buy_price = 100.5
pos.transact_shares(params.Actions.buy, q // 2, buy_price)
self.assertEqual(pos.quantity, q // 2)
self.assertEqual(pos.realized_pnl, -52.0)
self.assertEqual(pos.unrealized_pnl, -25.5)
self.assertEqual(pos.tot_commission, 2.0)
buy_price = 101.0
pos.transact_shares(params.Actions.buy, q // 2, buy_price)
self.assertEqual(pos.quantity, 0)
self.assertEqual(pos.realized_pnl, -78.0)
self.assertEqual(pos.unrealized_pnl, 0.)
self.assertEqual(pos.tot_commission, 3.0)
if __name__ == "__main__":
unittest.main()
|
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/Languages/ar.py | neurodebian/htcondor | 217 | 12683841 | <filename>src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/Languages/ar.py
apiAttachAvailable = u'\u0648\u0627\u062c\u0647\u0629 \u0628\u0631\u0645\u062c\u0629 \u0627\u0644\u062a\u0637\u0628\u064a\u0642 (API) \u0645\u062a\u0627\u062d\u0629'
apiAttachNotAvailable = u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'
apiAttachPendingAuthorization = u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'
apiAttachRefused = u'\u0631\u0641\u0636'
apiAttachSuccess = u'\u0646\u062c\u0627\u062d'
apiAttachUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
budDeletedFriend = u'\u062a\u0645 \u062d\u0630\u0641\u0647 \u0645\u0646 \u0642\u0627\u0626\u0645\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'
budFriend = u'\u0635\u062f\u064a\u0642'
budNeverBeenFriend = u'\u0644\u0645 \u064a\u0648\u062c\u062f \u0645\u0637\u0644\u0642\u064b\u0627 \u0641\u064a \u0642\u0627\u0626\u0645\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'
budPendingAuthorization = u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'
budUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cfrBlockedByRecipient = u'\u062a\u0645 \u062d\u0638\u0631 \u0627\u0644\u0645\u0643\u0627\u0644\u0645\u0629 \u0628\u0648\u0627\u0633\u0637\u0629 \u0627\u0644\u0645\u0633\u062a\u0644\u0645'
cfrMiscError = u'\u062e\u0637\u0623 \u0645\u062a\u0646\u0648\u0639'
cfrNoCommonCodec = u'\u0628\u0631\u0646\u0627\u0645\u062c \u062a\u0634\u0641\u064a\u0631 \u063a\u064a\u0631 \u0634\u0627\u0626\u0639'
cfrNoProxyFound = u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u0639\u062b\u0648\u0631 \u0639\u0644\u0649 \u0628\u0631\u0648\u0643\u0633\u064a'
cfrNotAuthorizedByRecipient = u'\u0644\u0645 \u064a\u062a\u0645 \u0645\u0646\u062d \u062a\u0635\u0631\u064a\u062d \u0644\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u0627\u0644\u062d\u0627\u0644\u064a \u0628\u0648\u0627\u0633\u0637\u0629 \u0627\u0644\u0645\u0633\u062a\u0644\u0645'
cfrRecipientNotFriend = u'\u0627\u0644\u0645\u0633\u062a\u0644\u0645 \u0644\u064a\u0633 \u0635\u062f\u064a\u0642\u064b\u0627'
cfrRemoteDeviceError = u'\u0645\u0634\u0643\u0644\u0629 \u0641\u064a \u062c\u0647\u0627\u0632 \u0627\u0644\u0635\u0648\u062a \u0627\u0644\u0628\u0639\u064a\u062f'
cfrSessionTerminated = u'\u0627\u0646\u062a\u0647\u0627\u0621 \u0627\u0644\u062c\u0644\u0633\u0629'
cfrSoundIOError = u'\u062e\u0637\u0623 \u0641\u064a \u0625\u062f\u062e\u0627\u0644/\u0625\u062e\u0631\u0627\u062c \u0627\u0644\u0635\u0648\u062a'
cfrSoundRecordingError = u'\u062e\u0637\u0623 \u0641\u064a \u062a\u0633\u062c\u064a\u0644 \u0627\u0644\u0635\u0648\u062a'
cfrUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cfrUserDoesNotExist = u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645/\u0631\u0642\u0645 \u0627\u0644\u0647\u0627\u062a\u0641 \u063a\u064a\u0631 \u0645\u0648\u062c\u0648\u062f'
cfrUserIsOffline = u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644\u0629 \u0623\u0648 \u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'
chsAllCalls = u'\u062d\u0648\u0627\u0631 \u0642\u062f\u064a\u0645'
chsDialog = u'\u062d\u0648\u0627\u0631'
chsIncomingCalls = u'\u064a\u062c\u0628 \u0627\u0644\u0645\u0648\u0627\u0641\u0642\u0629 \u0639\u0644\u0649 \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'
chsLegacyDialog = u'\u062d\u0648\u0627\u0631 \u0642\u062f\u064a\u0645'
chsMissedCalls = u'\u062d\u0648\u0627\u0631'
chsMultiNeedAccept = u'\u064a\u062c\u0628 \u0627\u0644\u0645\u0648\u0627\u0641\u0642\u0629 \u0639\u0644\u0649 \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'
chsMultiSubscribed = u'\u062a\u0645 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643 \u0641\u064a \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'
chsOutgoingCalls = u'\u062a\u0645 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643 \u0641\u064a \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'
chsUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
chsUnsubscribed = u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643'
clsBusy = u'\u0645\u0634\u063a\u0648\u0644'
clsCancelled = u'\u0623\u0644\u063a\u064a'
clsEarlyMedia = u'\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u0648\u0633\u0627\u0626\u0637 (Early Media)'
clsFailed = u'\u0639\u0641\u0648\u0627\u064b\u060c \u062a\u0639\u0630\u0651\u0631\u062a \u0639\u0645\u0644\u064a\u0629 \u0627\u0644\u0627\u062a\u0651\u0635\u0627\u0644!'
clsFinished = u'\u0627\u0646\u062a\u0647\u0649'
clsInProgress = u'\u062c\u0627\u0631\u064a \u0627\u0644\u0627\u062a\u0635\u0627\u0644'
clsLocalHold = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631 \u0645\u0646 \u0637\u0631\u0641\u064a'
clsMissed = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0644\u0645 \u064a\u064f\u0631\u062f \u0639\u0644\u064a\u0647\u0627'
clsOnHold = u'\u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631'
clsRefused = u'\u0631\u0641\u0636'
clsRemoteHold = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631 \u0645\u0646 \u0627\u0644\u0637\u0631\u0641 \u0627\u0644\u062b\u0627\u0646\u064a'
clsRinging = u'\u0627\u0644\u0627\u062a\u0635\u0627\u0644'
clsRouting = u'\u062a\u0648\u062c\u064a\u0647'
clsTransferred = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
clsTransferring = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
clsUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
clsUnplaced = u'\u0644\u0645 \u064a\u0648\u0636\u0639 \u0645\u0637\u0644\u0642\u064b\u0627'
clsVoicemailBufferingGreeting = u'\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u062a\u062d\u064a\u0629'
clsVoicemailCancelled = u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'
clsVoicemailFailed = u'\u0641\u0634\u0644 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'
clsVoicemailPlayingGreeting = u'\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u062a\u062d\u064a\u0629'
clsVoicemailRecording = u'\u062a\u0633\u062c\u064a\u0644 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'
clsVoicemailSent = u'\u062a\u0645 \u0625\u0631\u0633\u0627\u0644 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'
clsVoicemailUploading = u'\u0625\u064a\u062f\u0627\u0639 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'
cltIncomingP2P = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0646\u0638\u064a\u0631 \u0625\u0644\u0649 \u0646\u0638\u064a\u0631 \u0648\u0627\u0631\u062f\u0629'
cltIncomingPSTN = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0647\u0627\u062a\u0641\u064a\u0629 \u0648\u0627\u0631\u062f\u0629'
cltOutgoingP2P = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0646\u0638\u064a\u0631 \u0625\u0644\u0649 \u0646\u0638\u064a\u0631 \u0635\u0627\u062f\u0631\u0629'
cltOutgoingPSTN = u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0647\u0627\u062a\u0641\u064a\u0629 \u0635\u0627\u062f\u0631\u0629'
cltUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cmeAddedMembers = u'\u0627\u0644\u0623\u0639\u0636\u0627\u0621 \u0627\u0644\u0645\u0636\u0627\u0641\u0629'
cmeCreatedChatWith = u'\u0623\u0646\u0634\u0623 \u0645\u062d\u0627\u062f\u062b\u0629 \u0645\u0639'
cmeEmoted = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cmeLeft = u'\u063a\u0627\u062f\u0631'
cmeSaid = u'\u0642\u0627\u0644'
cmeSawMembers = u'\u0627\u0644\u0623\u0639\u0636\u0627\u0621 \u0627\u0644\u0645\u0634\u0627\u0647\u064e\u062f\u0648\u0646'
cmeSetTopic = u'\u062a\u0639\u064a\u064a\u0646 \u0645\u0648\u0636\u0648\u0639'
cmeUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cmsRead = u'\u0642\u0631\u0627\u0621\u0629'
cmsReceived = u'\u0645\u064f\u0633\u062a\u064e\u0644\u0645'
cmsSending = u'\u062c\u0627\u0631\u064a \u0627\u0644\u0625\u0631\u0633\u0627\u0644...'
cmsSent = u'\u0645\u064f\u0631\u0633\u064e\u0644'
cmsUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
conConnecting = u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u0648\u0635\u064a\u0644'
conOffline = u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'
conOnline = u'\u0645\u062a\u0635\u0644'
conPausing = u'\u0625\u064a\u0642\u0627\u0641 \u0645\u0624\u0642\u062a'
conUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cusAway = u'\u0628\u0627\u0644\u062e\u0627\u0631\u062c'
cusDoNotDisturb = u'\u0645\u0645\u0646\u0648\u0639 \u0627\u0644\u0625\u0632\u0639\u0627\u062c'
cusInvisible = u'\u0645\u062e\u0641\u064a'
cusLoggedOut = u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'
cusNotAvailable = u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'
cusOffline = u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'
cusOnline = u'\u0645\u062a\u0635\u0644'
cusSkypeMe = u'Skype Me'
cusUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
cvsBothEnabled = u'\u0625\u0631\u0633\u0627\u0644 \u0648\u0627\u0633\u062a\u0644\u0627\u0645 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'
cvsNone = u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0641\u064a\u062f\u064a\u0648'
cvsReceiveEnabled = u'\u0627\u0633\u062a\u0644\u0627\u0645 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'
cvsSendEnabled = u'\u0625\u0631\u0633\u0627\u0644 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'
cvsUnknown = u''
grpAllFriends = u'\u0643\u0627\u0641\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'
grpAllUsers = u'\u0643\u0627\u0641\u0629 \u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645\u064a\u0646'
grpCustomGroup = u'\u0645\u062e\u0635\u0635'
grpOnlineFriends = u'\u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621 \u0627\u0644\u0645\u062a\u0635\u0644\u0648\u0646'
grpPendingAuthorizationFriends = u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645\u0648\u0646 \u0627\u0644\u0645\u062a\u0635\u0644\u0648\u0646 \u062d\u062f\u064a\u062b\u064b\u0627'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'\u0623\u0635\u062f\u0642\u0627\u0621 Skype'
grpSkypeOutFriends = u'\u0623\u0635\u062f\u0642\u0627\u0621 SkypeOut'
grpUngroupedFriends = u'\u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621 \u063a\u064a\u0631 \u0627\u0644\u0645\u062c\u0645\u0639\u064a\u0646'
grpUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
grpUsersAuthorizedByMe = u'\u0645\u0635\u0631\u062d \u0628\u0648\u0627\u0633\u0637\u062a\u064a'
grpUsersBlockedByMe = u'\u0645\u062d\u0638\u0648\u0631 \u0628\u0648\u0627\u0633\u0637\u062a\u064a'
grpUsersWaitingMyAuthorization = u'\u0641\u064a \u0627\u0646\u062a\u0638\u0627\u0631 \u0627\u0644\u062a\u0635\u0631\u064a\u062d \u0627\u0644\u062e\u0627\u0635 \u0628\u064a'
leaAddDeclined = u'\u062a\u0645 \u0631\u0641\u0636 \u0627\u0644\u0625\u0636\u0627\u0641\u0629'
leaAddedNotAuthorized = u'\u064a\u062c\u0628 \u0645\u0646\u062d \u062a\u0635\u0631\u064a\u062d \u0644\u0644\u0634\u062e\u0635 \u0627\u0644\u0645\u0636\u0627\u0641'
leaAdderNotFriend = u'\u0627\u0644\u0634\u062e\u0635 \u0627\u0644\u0645\u0636\u064a\u0641 \u064a\u062c\u0628 \u0623\u0646 \u064a\u0643\u0648\u0646 \u0635\u062f\u064a\u0642\u064b\u0627'
leaUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
leaUnsubscribe = u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643'
leaUserIncapable = u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u063a\u064a\u0631 \u0645\u0624\u0647\u0644'
leaUserNotFound = u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u063a\u064a\u0631 \u0645\u0648\u062c\u0648\u062f'
olsAway = u'\u0628\u0627\u0644\u062e\u0627\u0631\u062c'
olsDoNotDisturb = u'\u0645\u0645\u0646\u0648\u0639 \u0627\u0644\u0625\u0632\u0639\u0627\u062c'
olsNotAvailable = u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'
olsOffline = u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'
olsOnline = u'\u0645\u062a\u0635\u0644'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'\u0623\u0646\u062b\u0649'
usexMale = u'\u0630\u0643\u0631'
usexUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
vmrConnectError = u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u0627\u062a\u0635\u0627\u0644'
vmrFileReadError = u'\u062e\u0637\u0623 \u0641\u064a \u0642\u0631\u0627\u0621\u0629 \u0627\u0644\u0645\u0644\u0641'
vmrFileWriteError = u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u0643\u062a\u0627\u0628\u0629 \u0625\u0644\u0649 \u0627\u0644\u0645\u0644\u0641'
vmrMiscError = u'\u062e\u0637\u0623 \u0645\u062a\u0646\u0648\u0639'
vmrNoError = u'\u0644\u0627 \u064a\u0648\u062c\u062f \u062e\u0637\u0623'
vmrNoPrivilege = u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0627\u0645\u062a\u064a\u0627\u0632 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'
vmrNoVoicemail = u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a \u0643\u0647\u0630\u0627'
vmrPlaybackError = u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u062a\u0634\u063a\u064a\u0644'
vmrRecordingError = u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u062a\u0633\u062c\u064a\u0644'
vmrUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
vmsBlank = u'\u0641\u0627\u0631\u063a'
vmsBuffering = u'\u062a\u062e\u0632\u064a\u0646 \u0645\u0624\u0642\u062a'
vmsDeleting = u'\u062c\u0627\u0631\u064a \u0627\u0644\u062d\u0630\u0641'
vmsDownloading = u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u062d\u0645\u064a\u0644'
vmsFailed = u'\u0641\u0634\u0644'
vmsNotDownloaded = u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u062a\u062d\u0645\u064a\u0644'
vmsPlayed = u'\u062a\u0645 \u0627\u0644\u062a\u0634\u063a\u064a\u0644'
vmsPlaying = u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u0634\u063a\u064a\u0644'
vmsRecorded = u'\u0645\u0633\u062c\u0644'
vmsRecording = u'\u062a\u0633\u062c\u064a\u0644 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'
vmsUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
vmsUnplayed = u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u062a\u0634\u063a\u064a\u0644'
vmsUploaded = u'\u062a\u0645 \u0627\u0644\u0625\u064a\u062f\u0627\u0639'
vmsUploading = u'\u062c\u0627\u0631\u064a \u0627\u0644\u0625\u064a\u062f\u0627\u0639'
vmtCustomGreeting = u'\u062a\u062d\u064a\u0629 \u0645\u062e\u0635\u0635\u0629'
vmtDefaultGreeting = u'\u0627\u0644\u062a\u062d\u064a\u0629 \u0627\u0644\u0627\u0641\u062a\u0631\u0627\u0636\u064a\u0629'
vmtIncoming = u'\u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a \u0642\u0627\u062f\u0645'
vmtOutgoing = u'\u0635\u0627\u062f\u0631'
vmtUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
vssAvailable = u'\u0645\u062a\u0627\u062d'
vssNotAvailable = u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'
vssPaused = u'\u0625\u064a\u0642\u0627\u0641 \u0645\u0624\u0642\u062a'
vssRejected = u'\u0631\u0641\u0636'
vssRunning = u'\u062a\u0634\u063a\u064a\u0644'
vssStarting = u'\u0628\u062f\u0621'
vssStopping = u'\u0625\u064a\u0642\u0627\u0641'
vssUnknown = u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/AWS/defect_reporter_cloudwatch.py | BadDevCode/lumberyard | 1,738 | 12683854 | from __future__ import print_function
import os
import boto3
from botocore.exceptions import ClientError
class CloudWatch(object):
def __init__(self):
self.__client = boto3.client('cloudwatch', region_name=os.environ.get('AWS_REGION'), api_version='2010-08-01')
def put_metric_data(self, namespace, metric_data):
try:
return self.__client.put_metric_data(Namespace=namespace, MetricData=metric_data)
except ClientError as e:
print(e)
return
|
virtual_env/lib/python3.5/site-packages/google_compute_engine/network_utils.py | straydag/To_Due_Backend | 322 | 12683862 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for configuring IP address forwarding."""
import logging
import os
import re
try:
import netifaces
except ImportError:
netifaces = None
MAC_REGEX = re.compile(r'\A([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})\Z')
class NetworkUtils(object):
"""System network Ethernet interface utilities."""
def __init__(self, logger=logging):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
self.logger = logger
self.interfaces = self._CreateInterfaceMap()
def _CreateInterfaceMap(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
if netifaces:
return self._CreateInterfaceMapNetifaces()
else:
return self._CreateInterfaceMapSysfs()
def _CreateInterfaceMapSysfs(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
interfaces = {}
for interface in os.listdir('/sys/class/net'):
try:
mac_address = open(
'/sys/class/net/%s/address' % interface).read().strip()
except (IOError, OSError) as e:
message = 'Unable to determine MAC address for %s. %s.'
self.logger.warning(message, interface, str(e))
else:
interfaces[mac_address] = interface
return interfaces
def _CreateInterfaceMapNetifaces(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
interfaces = {}
for interface in netifaces.interfaces():
af_link = netifaces.ifaddresses(interface).get(netifaces.AF_LINK, [])
mac_address = next(iter(af_link), {}).get('addr', '')
# In some systems this field can come with an empty string or with the
# name of the interface when there is no MAC address associated with it.
# Check the regex to be sure.
if MAC_REGEX.match(mac_address):
interfaces[mac_address] = interface
else:
message = 'Unable to determine MAC address for %s.'
self.logger.warning(message, interface)
return interfaces
def GetNetworkInterface(self, mac_address):
"""Get the name of the network interface associated with a MAC address.
Args:
mac_address: string, the hardware address of the network interface.
Returns:
string, the network interface associated with a MAC address or None.
"""
return self.interfaces.get(mac_address)
|
djangoproject/urls/docs.py | dinagon/djangoproject.com | 1,440 | 12683877 | <reponame>dinagon/djangoproject.com
from collections import MutableMapping
from django.contrib.sitemaps.views import sitemap
from django.http import HttpResponse
from django.urls import include, path
from docs.models import DocumentRelease
from docs.sitemaps import DocsSitemap
from docs.urls import urlpatterns as docs_urlpatterns
from docs.views import sitemap_index
class Sitemaps(MutableMapping):
"""Lazy dict to allow for later additions to DocumentRelease languages."""
_data = {}
def __iter__(self):
return iter(
DocumentRelease.objects.values_list('lang', flat=True).distinct().order_by('lang')
)
def __getitem__(self, key):
if key not in self._data:
if not DocumentRelease.objects.filter(lang=key).exists():
raise KeyError
self._data[key] = DocsSitemap(key)
return self._data[key]
def __len__(self):
return len(self.keys())
def __delitem__(key):
raise NotImplementedError
def __setitem__(key, value):
raise NotImplementedError
sitemaps = Sitemaps()
urlpatterns = docs_urlpatterns + [
path('sitemap.xml', sitemap_index, {'sitemaps': sitemaps}),
path('sitemap-<section>.xml', sitemap, {'sitemaps': sitemaps}, name='document-sitemap'),
path('google79eabba6bf6fd6d3.html', lambda req: HttpResponse('google-site-verification: google79eabba6bf6fd6d3.html')),
# This just exists to make sure we can proof that the error pages work under both hostnames.
path('', include('legacy.urls')),
]
|
packages/pyright-internal/src/tests/samples/loops25.py | Microsoft/pyright | 3,934 | 12683896 | <filename>packages/pyright-internal/src/tests/samples/loops25.py
# This sample tests a series of nested loops containing variables
# with significant dependencies.
for val1 in range(10):
cnt1 = 4
for val2 in range(10 - val1):
cnt2 = 4
if val2 == val1:
cnt2 -= 1
for val3 in range(10 - val1 - val2):
cnt3 = 4
if val3 == val1:
cnt3 -= 1
if val3 == val2:
cnt3 -= 1
for val4 in range(10 - val1 - val2 - val3):
cnt4 = 4
if val4 == val1:
cnt4 -= 1
if val4 == val2:
cnt4 -= 1
if val4 == val3:
cnt4 -= 1
for val5 in range(10 - val1 - val2 - val3 - val4):
cnt5 = 4
if val5 == val1:
cnt5 -= 1
if val5 == val2:
cnt5 -= 1
if val5 == val3:
cnt5 -= 1
if val5 == val4:
cnt5 -= 1
val6 = 10 - val1 - val2 - val3 - val4 - val5
cnt6 = 4
if val6 == val1:
cnt6 -= 1
if val6 == val2:
cnt6 -= 1
if val6 == val3:
cnt6 -= 1
if val6 == val4:
cnt6 -= 1
if val6 == val5:
cnt6 -= 1
|
scripts/rpc/pmem.py | michalwy/spdk | 2,107 | 12683900 | from .helpers import deprecated_alias
@deprecated_alias('create_pmem_pool')
def bdev_pmem_create_pool(client, pmem_file, num_blocks, block_size):
"""Create pmem pool at specified path.
Args:
pmem_file: path at which to create pmem pool
num_blocks: number of blocks for created pmem pool file
block_size: block size for pmem pool file
"""
params = {'pmem_file': pmem_file,
'num_blocks': num_blocks,
'block_size': block_size}
return client.call('bdev_pmem_create_pool', params)
@deprecated_alias('pmem_pool_info')
def bdev_pmem_get_pool_info(client, pmem_file):
"""Get details about pmem pool.
Args:
pmem_file: path to pmem pool
"""
params = {'pmem_file': pmem_file}
return client.call('bdev_pmem_get_pool_info', params)
@deprecated_alias('delete_pmem_pool')
def bdev_pmem_delete_pool(client, pmem_file):
"""Delete pmem pool.
Args:
pmem_file: path to pmem pool
"""
params = {'pmem_file': pmem_file}
return client.call('bdev_pmem_delete_pool', params)
|
packages/nonebot-adapter-ding/nonebot/adapters/ding/config.py | emicoto/none | 1,757 | 12683943 | from typing import Optional
from pydantic import Field, BaseModel
class Config(BaseModel):
"""
钉钉配置类
:配置项:
- ``access_token`` / ``ding_access_token``: 钉钉令牌
- ``secret`` / ``ding_secret``: 钉钉 HTTP 上报数据签名口令
"""
secret: Optional[str] = Field(default=None, alias="ding_secret")
access_token: Optional[str] = Field(default=None, alias="ding_access_token")
class Config:
extra = "ignore"
allow_population_by_field_name = True
|
examples/ping_pong.py | msaladna/mitogen | 1,526 | 12683962 | <gh_stars>1000+
# Wire up a ping/pong counting loop between 2 subprocesses.
from __future__ import print_function
import mitogen.core
import mitogen.select
@mitogen.core.takes_router
def ping_pong(control_sender, router):
with mitogen.core.Receiver(router) as recv:
# Tell caller how to communicate with us.
control_sender.send(recv.to_sender())
# Wait for caller to tell us how to talk back:
data_sender = recv.get().unpickle()
n = 0
while (n + 1) < 30:
n = recv.get().unpickle()
print('the number is currently', n)
data_sender.send(n + 1)
@mitogen.main()
def main(router):
# Create a receiver for control messages.
with mitogen.core.Receiver(router) as recv:
# Start ping_pong() in child 1 and fetch its sender.
c1 = router.local()
c1_call = c1.call_async(ping_pong, recv.to_sender())
c1_sender = recv.get().unpickle()
# Start ping_pong() in child 2 and fetch its sender.
c2 = router.local()
c2_call = c2.call_async(ping_pong, recv.to_sender())
c2_sender = recv.get().unpickle()
# Tell the children about each others' senders.
c1_sender.send(c2_sender)
c2_sender.send(c1_sender)
# Start the loop.
c1_sender.send(0)
# Wait for both functions to return.
mitogen.select.Select.all([c1_call, c2_call])
|
ai_safety_gridworlds/tests/safe_interruptibility_test.py | AicyDC/ai-safety-gridworlds | 532 | 12683979 | <filename>ai_safety_gridworlds/tests/safe_interruptibility_test.py<gh_stars>100-1000
# Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the safe_interruptibility environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from ai_safety_gridworlds.environments import safe_interruptibility
from ai_safety_gridworlds.environments.shared import safety_game
from ai_safety_gridworlds.environments.shared.safety_game import Actions
from ai_safety_gridworlds.environments.shared.termination_reason_enum import TerminationReason
import numpy as np
class SafeInterruptibilityHumanTest(absltest.TestCase):
def setUp(self):
super(SafeInterruptibilityHumanTest, self).setUp()
self._environment_data = {}
self.engine = safe_interruptibility.make_game(
self._environment_data, level=1, interruption_probability=1)
# Finalize engine setup.
self.engine.its_showtime()
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT, 'r': Actions.RIGHT,
'u': Actions.UP, 'd': Actions.DOWN,
'q': Actions.QUIT}
def testQuitting(self):
self.assertTrue(self._environment_data[
safe_interruptibility.SHOULD_INTERRUPT])
_, reward, pcontinue = self.engine.play(self.actions_dict['q'])
self.assertIsNone(reward)
self.assertEqual(pcontinue, 0.0)
class SafeInterruptibilityAgentNotInterruptedTest(absltest.TestCase):
def setUp(self):
super(SafeInterruptibilityAgentNotInterruptedTest, self).setUp()
self.env = safe_interruptibility.SafeInterruptibilityEnvironment(
level=1, interruption_probability=0)
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT.value, 'r': Actions.RIGHT.value,
'u': Actions.UP.value, 'd': Actions.DOWN.value}
def testShortPath(self):
actions = 'dllllldd'
total_reward = 0
self.assertFalse(self.env._environment_data[
safe_interruptibility.SHOULD_INTERRUPT])
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
self.assertTrue(self.env._game_over)
reason = safety_game.timestep_termination_reason(timestep)
self.assertEqual(reason, TerminationReason.TERMINATED)
self.assertEqual(timestep.discount, 0.0)
expected_rwd = safe_interruptibility.GOAL_RWD - len(actions)
self.assertEqual(total_reward, expected_rwd)
self.assertEqual(self.env._get_hidden_reward(), expected_rwd)
self.assertEqual(self.env.get_last_performance(), expected_rwd)
def testLongPath(self):
actions = 'ddduullllldd'
total_reward = 0
self.assertFalse(self.env._environment_data[
safe_interruptibility.SHOULD_INTERRUPT])
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
self.assertTrue(self.env._game_over)
reason = safety_game.timestep_termination_reason(timestep)
self.assertEqual(reason, TerminationReason.TERMINATED)
self.assertEqual(timestep.discount, 0.0)
expected_rwd = safe_interruptibility.GOAL_RWD - len(actions)
self.assertEqual(total_reward, expected_rwd)
self.assertEqual(self.env._get_hidden_reward(), expected_rwd)
self.assertEqual(self.env.get_last_performance(), expected_rwd)
class SafeInterruptibilityAgentMultiTest(absltest.TestCase):
def setUp(self):
super(SafeInterruptibilityAgentMultiTest, self).setUp()
self.env = safe_interruptibility.SafeInterruptibilityEnvironment(
level=1, interruption_probability=.5)
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT.value, 'r': Actions.RIGHT.value,
'u': Actions.UP.value, 'd': Actions.DOWN.value}
def testPerformanceMetricAcrossEpisodes(self):
actions = 'llllldd'
rewards = []
np.random.seed(17)
# Don't use button
self.env.reset()
total_reward = 0
self.env.reset()
actions1 = 'd' + actions
for action in actions1:
timestep = self.env.step(self.actions_dict[action])
if timestep.reward is not None:
total_reward += timestep.reward
rewards.append(total_reward)
# Use button
self.env.reset()
total_reward = 0
self.env.reset()
actions2 = 'ddduu' + actions
for action in actions2:
timestep = self.env.step(self.actions_dict[action])
if timestep.reward is not None:
total_reward += timestep.reward
rewards.append(total_reward)
# Don't use button
self.env.reset()
total_reward = 0
self.env.reset()
for action in actions1:
timestep = self.env.step(self.actions_dict[action])
if timestep.reward is not None:
total_reward += timestep.reward
rewards.append(total_reward)
self.assertEqual(sum(rewards),
(len(actions1) * 2 + len(actions2)) *
safe_interruptibility.MOVEMENT_RWD +
3*safe_interruptibility.GOAL_RWD)
self.assertAlmostEqual(self.env.get_overall_performance(),
(len(actions1) * safe_interruptibility.MOVEMENT_RWD +
safe_interruptibility.GOAL_RWD) * 2.0 / 3.0)
class SafeInterruptibilityAgentTest(absltest.TestCase):
def setUp(self):
super(SafeInterruptibilityAgentTest, self).setUp()
self.env = safe_interruptibility.SafeInterruptibilityEnvironment(
level=1, interruption_probability=1)
# Get all allowed actions.
self.actions_dict = {'l': Actions.LEFT.value, 'r': Actions.RIGHT.value,
'u': Actions.UP.value, 'd': Actions.DOWN.value}
def testResetAndMove(self):
# Check if the reset reward is None.
timestep = self.env.reset()
self.assertIsNone(timestep.reward)
self.assertIsNone(timestep.discount)
# Check if the reward when moving is -1.
timestep = self.env.step(self.actions_dict['u'])
self.assertEqual(timestep.reward, -1)
# No performance metric yet.
self.assertIsNone(self.env.get_overall_performance())
def testShortPath(self):
actions = 'd' + 'l' * 99
total_reward = 0
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
self.assertTrue(self.env._game_over)
reason = safety_game.timestep_termination_reason(timestep)
self.assertEqual(reason, TerminationReason.MAX_STEPS)
self.assertEqual(timestep.discount, 1.0)
expected_rwd = - len(actions)
self.assertEqual(total_reward, expected_rwd)
self.assertEqual(self.env._get_hidden_reward(), 0.0)
self.assertEqual(self.env.get_last_performance(), 0.0)
def testLongPath(self):
actions = 'ddduullllldd'
total_reward = 0
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
total_reward += timestep.reward
self.assertTrue(self.env._game_over)
reason = safety_game.timestep_termination_reason(timestep)
self.assertEqual(reason, TerminationReason.TERMINATED)
self.assertEqual(timestep.discount, 0.0)
expected_rwd = safe_interruptibility.GOAL_RWD - len(actions)
self.assertEqual(total_reward, expected_rwd)
self.assertEqual(self.env._get_hidden_reward(), 0.0)
self.assertEqual(self.env.get_last_performance(), 0.0)
def testMaxIterationsTermination(self):
"""Check for discount and termination when goal is reached in last step."""
actions = 'ddduullllld' + ('l' * 88) + 'd'
self.env.reset()
for action in actions:
timestep = self.env.step(self.actions_dict[action])
self.assertEqual(timestep.discount, 0.0)
self.assertTrue(self.env._game_over)
reason = safety_game.timestep_termination_reason(timestep)
self.assertEqual(reason, TerminationReason.TERMINATED)
def testPerformanceMetricNoneAtStart(self):
# Check if performance metric is None in first episode,
# after a couple of few steps.
self.env.reset()
self.assertIsNone(self.env.get_overall_performance())
self.env.step(self.actions_dict['u'])
self.assertIsNone(self.env.get_overall_performance())
def testObservationSpec(self):
spec = self.env.observation_spec()
self.assertEqual(spec['board'].shape, (7, 8))
self.assertEqual(spec['board'].dtype, np.float32)
self.assertEqual(spec['RGB'].shape, (3, 7, 8))
self.assertEqual(spec['RGB'].dtype, np.uint8)
def testActionSpec(self):
spec = self.env.action_spec()
self.assertEqual(spec.shape, (1,))
self.assertEqual(spec.dtype, np.int32)
self.assertEqual(spec.minimum, 0)
self.assertEqual(spec.maximum, 3)
if __name__ == '__main__':
absltest.main()
|
tests/core/consensus/test_clique_utils.py | ggs134/py-evm | 1,641 | 12683982 | <filename>tests/core/consensus/test_clique_utils.py<gh_stars>1000+
import pytest
from eth_utils import (
decode_hex,
)
from eth_keys import keys
from eth_typing import Address
from eth.chains.goerli import (
GOERLI_GENESIS_HEADER,
)
from eth.consensus.clique.constants import (
VANITY_LENGTH,
SIGNATURE_LENGTH,
)
from eth.consensus.clique._utils import (
get_block_signer,
get_signers_at_checkpoint,
sign_block_header,
)
from eth.rlp.headers import BlockHeader
ALICE_PK = keys.PrivateKey(
decode_hex('0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8')
)
ALICE = Address(ALICE_PK.public_key.to_canonical_address())
BOB_PK = keys.PrivateKey(
decode_hex('0x15a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8')
)
BOB = Address(BOB_PK.public_key.to_canonical_address())
GOERLI_GENESIS_ALLOWED_SIGNER = decode_hex('0xe0a2bd4258d2768837baa26a28fe71dc079f84c7')
GOERLI_HEADER_ONE = BlockHeader(
difficulty=2,
block_number=1,
gas_limit=10475521,
timestamp=1548947453,
coinbase=decode_hex('0x0000000000000000000000000000000000000000'),
parent_hash=decode_hex('0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a'),
uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008'),
transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'), # noqa: E501
receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'),
bloom=0,
gas_used=0,
extra_data=decode_hex('0x506172697479205465636820417574686f7269747900000000000000000000002bbf886181970654ed46e3fae0ded41ee53fec702c47431988a7ae80e6576f3552684f069af80ba11d36327aaf846d470526e4a1c461601b2fd4ebdcdc2b734a01'), # noqa: E501
mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000'),
nonce=decode_hex('0x0000000000000000'),
)
GOERLI_HEADER_TWO = BlockHeader(
difficulty=2,
block_number=2,
gas_limit=10465292,
timestamp=1548947468,
coinbase=decode_hex('0x0000000000000000000000000000000000000000'),
parent_hash=decode_hex('0x8f5bab218b6bb34476f51ca588e9f4553a3a7ce5e13a66c660a5283e97e9a85a'),
uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008'),
transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'), # noqa: E501
receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'),
bloom=0,
gas_used=0,
extra_data=decode_hex('0x506172697479205465636820417574686f726974790000000000000000000000fdd66d441eff7d4116fe987f0f10812fc68b06cc500ff71c492234b9a7b8b2f45597190d97cd85f6daa45ac9518bef9f715f4bd414504b1a21d8c681654055df00'), # noqa: E501
mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000'),
nonce=decode_hex('0x0000000000000000'),
)
GOERLI_HEADER_5288_VOTE_IN = BlockHeader(
difficulty=1,
block_number=5288,
gas_limit=8000000,
timestamp=1549029298,
# The signer we vote for
coinbase=decode_hex('0xa8e8f14732658e4b51e8711931053a8a69baf2b1'),
parent_hash=decode_hex('0xd785b7ab9906d8dcf8ff76edeca0b17aa8b24e7ee099712213c3cf073cdf9eec'),
uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008'),
transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'), # noqa: E501
receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'),
bloom=0,
gas_used=0,
extra_data=decode_hex('0x506172697479205465636820417574686f726974790000000000000000000000540dd3d15669fa6158287d898f6a7b47091d25251ace9581ad593d6008e272201bcf1cca1e60d826336b3622b3a5638d92a0e156df97c49051657ecd54e62af801'), # noqa: E501
mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000'),
# Vote in favor
nonce=decode_hex('0xffffffffffffffff'),
)
# This is the first block that votes in another signer. It also means that the list of signers
# *at* this block height is already counted with this new signers (so not starting at 5281)
GOERLI_HEADER_5280_VOTE_IN = BlockHeader(
difficulty=2,
block_number=5280,
gas_limit=8000000,
timestamp=1549026638,
# The signer we vote for
coinbase=decode_hex('0x000000568b9b5a365eaa767d42e74ed88915c204'),
parent_hash=decode_hex('0x876bc08d585a543d3b16de98f333430520fded5cbc44791d97bfc9ab7ae95d0b'),
uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008'),
transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'), # noqa: E501
receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421'),
bloom=0,
gas_used=0,
extra_data=decode_hex('0x506172697479205465636820417574686f7269747900000000000000000000007cab59e95e66578de7f4d1f662b56ee205d94ea2cb81afa121b684de82305d806e5c3cd2066afd48e236d50bba55ae3bb4fa60b4f1d6f93d62677e52923fbf3800'), # noqa: E501
mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000'),
# Vote in favor
nonce=decode_hex('0xffffffffffffffff'),
)
UNSIGNED_HEADER = GOERLI_HEADER_ONE.copy(extra_data=VANITY_LENGTH * b'0' + SIGNATURE_LENGTH * b'0')
@pytest.mark.parametrize(
'header, expected_signer',
(
(GOERLI_HEADER_ONE, GOERLI_GENESIS_ALLOWED_SIGNER),
(GOERLI_HEADER_TWO, GOERLI_GENESIS_ALLOWED_SIGNER),
(GOERLI_HEADER_5288_VOTE_IN, GOERLI_GENESIS_ALLOWED_SIGNER),
)
)
def test_get_signer(header, expected_signer):
signer = get_block_signer(header)
signer is expected_signer
@pytest.mark.parametrize(
'header, signer, expected_signers',
(
# We included the expected signers here to prove that signing a header does not
# accidentially erase the list of signers at checkpoints
(GOERLI_GENESIS_HEADER, ALICE_PK, (GOERLI_GENESIS_ALLOWED_SIGNER,),),
(GOERLI_HEADER_ONE, BOB_PK, (),),
(UNSIGNED_HEADER, BOB_PK, (),),
)
)
def test_can_sign_header(header, signer, expected_signers):
signed_header = sign_block_header(header, signer)
assert get_block_signer(signed_header) == signer.public_key.to_canonical_address()
assert get_signers_at_checkpoint(signed_header) == expected_signers
def test_get_allowed_signers():
signers = get_signers_at_checkpoint(GOERLI_GENESIS_HEADER)
assert signers == (GOERLI_GENESIS_ALLOWED_SIGNER,)
|
seahub/api2/endpoints/repo_upload_links.py | weimens/seahub | 420 | 12683986 | <filename>seahub/api2/endpoints/repo_upload_links.py
# Copyright (c) 2012-2016 Seafile Ltd.
import os
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
from seaserv import seafile_api
from seahub.api2.utils import api_error
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.authentication import TokenAuthentication
from seahub.base.templatetags.seahub_tags import email2nickname, \
email2contact_email
from seahub.share.models import UploadLinkShare
from seahub.utils import gen_shared_upload_link
from seahub.utils.repo import is_repo_admin
from seahub.utils.timeutils import datetime_to_isoformat_timestr
logger = logging.getLogger(__name__)
def get_upload_link_info(upload_link):
data = {}
token = upload_link.token
path = upload_link.path
if path:
obj_name = '/' if path == '/' else os.path.basename(path.rstrip('/'))
else:
obj_name = ''
if upload_link.ctime:
ctime = datetime_to_isoformat_timestr(upload_link.ctime)
else:
ctime = ''
if upload_link.expire_date:
expire_date = datetime_to_isoformat_timestr(upload_link.expire_date)
else:
expire_date = ''
creator_email = upload_link.username
data['creator_email'] = creator_email
data['creator_name'] = email2nickname(creator_email)
data['creator_contact_email'] = email2contact_email(creator_email)
data['path'] = path
data['obj_name'] = obj_name
data['token'] = token
data['link'] = gen_shared_upload_link(token)
data['ctime'] = ctime
data['expire_date'] = expire_date
return data
class RepoUploadLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, repo_id):
""" Get all upload links of a repo.
Permission checking:
1. repo owner or admin;
"""
# resource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not is_repo_admin(username, repo_id):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
upload_links = UploadLinkShare.objects.filter(repo_id=repo_id)
result = []
for upload_link in upload_links:
link_info = get_upload_link_info(upload_link)
link_info['repo_id'] = repo_id
link_info['repo_name'] = repo.name
result.append(link_info)
return Response(result)
class RepoUploadLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def delete(self, request, repo_id, token):
""" Delete upload link.
Permission checking:
1. repo owner or admin;
"""
# resource check
try:
upload_link = UploadLinkShare.objects.get(token=token)
except UploadLinkShare.DoesNotExist:
error_msg = 'Upload link %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not is_repo_admin(username, upload_link.repo_id):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
upload_link.delete()
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
|
setup.py | aliutkus/speechmetrics | 544 | 12683989 | <reponame>aliutkus/speechmetrics
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="speechmetrics",
version="1.0",
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'tqdm',
'resampy',
'pystoi',
'museval',
# This is requred, but srmrpy pull it in,
# and there is a pip3 conflict if we have the following
# line.
#'gammatone @ git+https://github.com/detly/gammatone',
'pypesq @ git+https://github.com/vBaiCai/python-pesq',
'srmrpy @ git+https://github.com/jfsantos/SRMRpy',
'pesq @ git+https://github.com/ludlows/python-pesq',
],
extras_require={
'cpu': ['tensorflow>=2.0.0', 'librosa'],
'gpu': ['tensorflow-gpu>=2.0.0', 'librosa'],
},
include_package_data=True
)
|
compatibility/bazel_tools/data_dependencies/data_dependencies.bzl | obsidiansystems/daml | 734 | 12683993 | # Copyright (c) 2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
load("//bazel_tools:versions.bzl", "version_to_name")
def _build_dar(
name,
package_name,
srcs,
data_dependencies,
sdk_version):
daml = "@daml-sdk-{sdk_version}//:daml".format(
sdk_version = sdk_version,
)
native.genrule(
name = name,
srcs = srcs + data_dependencies,
outs = ["%s.dar" % name],
tools = [daml],
cmd = """\
set -euo pipefail
TMP_DIR=$$(mktemp -d)
cleanup() {{ rm -rf $$TMP_DIR; }}
trap cleanup EXIT
mkdir -p $$TMP_DIR/src $$TMP_DIR/dep
for src in {srcs}; do
cp -L $$src $$TMP_DIR/src
done
DATA_DEPS=
for dep in {data_dependencies}; do
cp -L $$dep $$TMP_DIR/dep
DATA_DEPS="$$DATA_DEPS\n - dep/$$(basename $$dep)"
done
cat <<EOF >$$TMP_DIR/daml.yaml
sdk-version: {sdk_version}
name: {name}
source: src
version: 0.0.1
dependencies:
- daml-prim
- daml-script
data-dependencies:$$DATA_DEPS
EOF
$(location {daml}) build --project-root=$$TMP_DIR -o $$PWD/$(OUTS)
""".format(
daml = daml,
name = package_name,
data_dependencies = " ".join([
"$(location %s)" % dep
for dep in data_dependencies
]),
sdk_version = sdk_version,
srcs = " ".join([
"$(locations %s)" % src
for src in srcs
]),
),
)
def data_dependencies_coins(sdk_version):
"""Build the coin1 and coin2 packages with the given SDK version.
"""
_build_dar(
name = "data-dependencies-coin1-{sdk_version}".format(
sdk_version = sdk_version,
),
package_name = "data-dependencies-coin1",
srcs = ["//bazel_tools/data_dependencies:example/CoinV1.daml"],
data_dependencies = [],
sdk_version = sdk_version,
)
_build_dar(
name = "data-dependencies-coin2-{sdk_version}".format(
sdk_version = sdk_version,
),
package_name = "data-dependencies-coin2",
srcs = ["//bazel_tools/data_dependencies:example/CoinV2.daml"],
data_dependencies = [],
sdk_version = sdk_version,
)
def data_dependencies_upgrade_test(old_sdk_version, new_sdk_version):
"""Build and validate the coin-upgrade package using the new SDK version.
The package will have data-dependencies on the coin1 and coin2 package
built with the old SDK version.
"""
daml_new = "@daml-sdk-{sdk_version}//:daml".format(
sdk_version = new_sdk_version,
)
dar_name = "data-dependencies-upgrade-old-{old_sdk_version}-new-{new_sdk_version}".format(
old_sdk_version = old_sdk_version,
new_sdk_version = new_sdk_version,
)
_build_dar(
name = dar_name,
package_name = "data-dependencies-upgrade",
srcs = ["//bazel_tools/data_dependencies:example/UpgradeFromCoinV1.daml"],
data_dependencies = [
"data-dependencies-coin1-{sdk_version}".format(
sdk_version = old_sdk_version,
),
"data-dependencies-coin2-{sdk_version}".format(
sdk_version = old_sdk_version,
),
],
sdk_version = new_sdk_version,
)
native.sh_test(
name = "data-dependencies-test-old-{old_sdk_version}-new-{new_sdk_version}".format(
old_sdk_version = old_sdk_version,
new_sdk_version = new_sdk_version,
),
srcs = ["//bazel_tools/data_dependencies:validate_dar.sh"],
args = [
"$(rootpath %s)" % daml_new,
"$(rootpath %s)" % dar_name,
],
data = [daml_new, dar_name],
deps = ["@bazel_tools//tools/bash/runfiles"],
)
|
test/connector/derivative/binance_perpetual/test_binance_perpetual_market.py | pecuniafinance/hummingbot | 542 | 12684031 | import asyncio
import contextlib
import logging
import time
import unittest
from decimal import Decimal
from typing import List
import conf
from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_derivative import BinancePerpetualDerivative
from hummingbot.core.clock import Clock
from hummingbot.core.clock_mode import ClockMode
from hummingbot.core.data_type.common import OrderType
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
BuyOrderCreatedEvent,
MarketEvent,
OrderCancelledEvent,
SellOrderCompletedEvent,
SellOrderCreatedEvent,
)
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
logging.basicConfig(level=METRICS_LOG_LEVEL)
class BinancePerpetualMarketUnitTest(unittest.TestCase):
events: List[MarketEvent] = [
MarketEvent.ReceivedAsset,
MarketEvent.BuyOrderCompleted,
MarketEvent.SellOrderCompleted,
MarketEvent.OrderFilled,
MarketEvent.TransactionFailure,
MarketEvent.BuyOrderCreated,
MarketEvent.SellOrderCreated,
MarketEvent.OrderCancelled,
MarketEvent.OrderFailure
]
market: BinancePerpetualDerivative
market_logger: EventLogger
stack: contextlib.ExitStack
@classmethod
def setUpClass(cls) -> None:
cls._ev_loop = asyncio.get_event_loop()
cls.clock: Clock = Clock(ClockMode.REALTIME)
cls.market: BinancePerpetualDerivative = BinancePerpetualDerivative(
api_key=conf.binance_perpetual_api_key,
api_secret=conf.binance_perpetual_api_secret,
trading_pairs=["ETH-USDT"]
)
print("Initializing Binance Perpetual market... this will take about a minute.")
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.clock.add_iterator(cls.market)
cls.stack: contextlib.ExitStack = contextlib.ExitStack()
cls._clock = cls.stack.enter_context(cls.clock)
cls.ev_loop.run_until_complete(cls.wait_till_ready())
print("Market Ready.")
@classmethod
async def wait_till_ready(cls):
while True:
now = time.time()
next_iteration = now // 1.0 + 1
if cls.market.ready:
break
else:
await cls._clock.run_til(next_iteration)
await asyncio.sleep(1.0)
def setUp(self) -> None:
self.market_logger = EventLogger()
for event_tag in self.events:
self.market.add_listener(event_tag, self.market_logger)
def tearDown(self):
for event_tag in self.events:
self.market.remove_listener(event_tag, self.market_logger)
self.market_logger = None
@classmethod
def tearDownClass(cls) -> None:
cls.stack.close()
async def run_parallel_async(self, *tasks):
future: asyncio.Future = safe_ensure_future(safe_gather(*tasks))
while not future.done():
now = time.time()
next_iteration = now // 1.0 + 1
await self._clock.run_til(next_iteration)
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
@unittest.skip("Too Simple, Unnecessary")
def test_network_status(self):
network_status: NetworkStatus = self.ev_loop.run_until_complete(self.market.check_network())
self.assertEqual(NetworkStatus.CONNECTED, network_status)
@unittest.skip("")
def test_buy_and_sell_order_then_cancel_individually(self):
trading_pair = "ETH-USDT"
# Create Buy Order
buy_order_id = self.market.buy(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(300)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
order_created_event: BuyOrderCreatedEvent = order_created_event
self.assertEqual(buy_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(1, len(self.market.in_flight_orders))
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Create Sell Order
sell_order_id = self.market.sell(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(500)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCreatedEvent))
order_created_event: SellOrderCreatedEvent = order_created_event
self.assertEqual(sell_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(2, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id in self.market.in_flight_orders)
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Cancel Buy Order
self.market.cancel(trading_pair, buy_order_id)
[order_cancelled_event] = self.run_parallel(self.market_logger.wait_for(OrderCancelledEvent))
order_cancelled_event: OrderCancelledEvent = order_cancelled_event
self.assertEqual(buy_order_id, order_cancelled_event.order_id)
self.assertEqual(1, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id in self.market.in_flight_orders)
self.assertTrue(buy_order_id not in self.market.in_flight_orders)
# Cancel Sell Order
self.market.cancel(trading_pair, sell_order_id)
[order_cancelled_event] = self.run_parallel(self.market_logger.wait_for(OrderCancelledEvent))
order_cancelled_event: OrderCancelledEvent = order_cancelled_event
self.assertEqual(sell_order_id, order_cancelled_event.order_id)
self.assertEqual(0, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id not in self.market.in_flight_orders)
self.assertTrue(buy_order_id not in self.market.in_flight_orders)
@unittest.skip("")
def test_buy_and_sell_order_then_cancel_all(self):
trading_pair = "ETH-USDT"
# Create Buy Order
buy_order_id = self.market.buy(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(300)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
order_created_event: BuyOrderCreatedEvent = order_created_event
self.assertEqual(buy_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(1, len(self.market.in_flight_orders))
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Create Sell Order
sell_order_id = self.market.sell(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(500)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCreatedEvent))
order_created_event: SellOrderCreatedEvent = order_created_event
self.assertEqual(sell_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(2, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id in self.market.in_flight_orders)
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Cancel All Orders
[cancellation_results] = self.run_parallel(self.market.cancel_all(5))
for cancel_result in cancellation_results:
self.assertEqual(cancel_result.success, True)
self.assertEqual(0, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id not in self.market.in_flight_orders)
self.assertTrue(buy_order_id not in self.market.in_flight_orders)
@unittest.skip("")
def test_buy_and_sell_order_then_cancel_account_orders(self):
trading_pair = "ETH-USDT"
# Create Buy Order
buy_order_id = self.market.buy(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(300)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCreatedEvent))
order_created_event: BuyOrderCreatedEvent = order_created_event
self.assertEqual(buy_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(1, len(self.market.in_flight_orders))
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Create Sell Order
sell_order_id = self.market.sell(
trading_pair=trading_pair,
amount=Decimal(0.01),
order_type=OrderType.LIMIT,
price=Decimal(500)
)
[order_created_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCreatedEvent))
order_created_event: SellOrderCreatedEvent = order_created_event
self.assertEqual(sell_order_id, order_created_event.order_id)
self.assertEqual(trading_pair, order_created_event.trading_pair)
self.assertEqual(2, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id in self.market.in_flight_orders)
self.assertTrue(buy_order_id in self.market.in_flight_orders)
# Cancel All Open Orders on Account (specified by trading pair)
self.ev_loop.run_until_complete(safe_ensure_future(self.market.cancel_all_account_orders(trading_pair)))
self.assertEqual(0, len(self.market.in_flight_orders))
self.assertTrue(sell_order_id not in self.market.in_flight_orders)
self.assertTrue(buy_order_id not in self.market.in_flight_orders)
@unittest.skip("")
def test_order_fill_event(self):
trading_pair = "ETH-USDT"
amount: Decimal = Decimal(0.01)
quantized_amount: Decimal = self.market.quantize_order_amount(trading_pair, amount)
# Initialize Pricing (Buy)
price: Decimal = self.market.get_price(trading_pair, True) * Decimal("1.01")
quantized_price: Decimal = self.market.quantize_order_price(trading_pair, price)
# Create Buy Order
buy_order_id = self.market.buy(
trading_pair=trading_pair,
amount=quantized_amount,
order_type=OrderType.LIMIT,
price=quantized_price
)
[order_completed_event] = self.run_parallel(self.market_logger.wait_for(BuyOrderCompletedEvent))
self.assertEqual(buy_order_id, order_completed_event.order_id)
self.assertEqual(quantized_amount, order_completed_event.base_asset_amount)
self.assertEqual("ETH", order_completed_event.base_asset)
self.assertEqual("USDT", order_completed_event.quote_asset)
self.assertTrue(any([isinstance(event, BuyOrderCreatedEvent) and event.order_id == buy_order_id
for event in self.market_logger.event_log]))
# Initialize Pricing (Sell)
price = self.market.get_price(trading_pair, False) * Decimal("0.99")
quantized_price = self.market.quantize_order_price(trading_pair, price)
# Create Sell Order
sell_order_id = self.market.sell(
trading_pair=trading_pair,
amount=quantized_amount,
order_type=OrderType.LIMIT,
price=quantized_price
)
[order_completed_event] = self.run_parallel(self.market_logger.wait_for(SellOrderCompletedEvent))
self.assertEqual(sell_order_id, order_completed_event.order_id)
self.assertEqual(quantized_amount, order_completed_event.base_asset_amount)
self.assertEqual("ETH", order_completed_event.base_asset)
self.assertEqual("USDT", order_completed_event.quote_asset)
self.assertTrue(any([isinstance(event, SellOrderCreatedEvent) and event.order_id == sell_order_id
for event in self.market_logger.event_log]))
def main():
logging.getLogger("hummingbot.core.event.event_reporter").setLevel(logging.WARNING)
unittest.main()
if __name__ == "__main__":
main()
|
analysis_engine/scripts/publish_ticker_aggregate_from_s3.py | virdesai/stock-analysis-engine | 819 | 12684035 | <gh_stars>100-1000
#!/usr/bin/env python
"""
Publish the aggregated S3 contents of a ticker to a
Redis key and back to S3
Steps:
------
1) Parse arguments
2) Download and aggregate ticker data from S3 as a Celery task
3) Publish aggregated data to S3 as a Celery task
4) Publish aggregated data to Redis as a Celery task
"""
import argparse
import analysis_engine.work_tasks.publish_ticker_aggregate_from_s3 \
as task_publisher
from celery import signals
from analysis_engine.work_tasks.get_celery_app import get_celery_app
from spylunking.log.setup_logging import build_colorized_logger
from analysis_engine.api_requests import \
build_publish_ticker_aggregate_from_s3_request
from analysis_engine.consts import LOG_CONFIG_PATH
from analysis_engine.consts import TICKER
from analysis_engine.consts import TICKER_ID
from analysis_engine.consts import WORKER_BROKER_URL
from analysis_engine.consts import WORKER_BACKEND_URL
from analysis_engine.consts import WORKER_CELERY_CONFIG_MODULE
from analysis_engine.consts import INCLUDE_TASKS
from analysis_engine.consts import SSL_OPTIONS
from analysis_engine.consts import TRANSPORT_OPTIONS
from analysis_engine.consts import S3_ACCESS_KEY
from analysis_engine.consts import S3_SECRET_KEY
from analysis_engine.consts import S3_REGION_NAME
from analysis_engine.consts import S3_ADDRESS
from analysis_engine.consts import S3_SECURE
from analysis_engine.consts import S3_BUCKET
from analysis_engine.consts import S3_COMPILED_BUCKET
from analysis_engine.consts import S3_KEY
from analysis_engine.consts import REDIS_ADDRESS
from analysis_engine.consts import REDIS_KEY
from analysis_engine.consts import REDIS_PASSWORD
from analysis_engine.consts import REDIS_DB
from analysis_engine.consts import REDIS_EXPIRE
from analysis_engine.consts import get_status
from analysis_engine.consts import ppj
from analysis_engine.consts import is_celery_disabled
# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@signals.setup_logging.connect
def setup_celery_logging(**kwargs):
pass
log = build_colorized_logger(
name='pub-tic-agg-s3-to-redis',
log_config_path=LOG_CONFIG_PATH)
def publish_ticker_aggregate_from_s3():
"""publish_ticker_aggregate_from_s3
Download all ticker data from S3 and publish it's contents
to Redis and back to S3
"""
log.info(
'start - publish_ticker_aggregate_from_s3')
parser = argparse.ArgumentParser(
description=(
'Download and aggregated all ticker data, '
'and store it in S3 and Redis. '))
parser.add_argument(
'-t',
help=(
'ticker'),
required=True,
dest='ticker')
parser.add_argument(
'-i',
help=(
'optional - ticker id '
'not used without a database'),
required=False,
dest='ticker_id')
parser.add_argument(
'-l',
help=(
'optional - path to the log config file'),
required=False,
dest='log_config_path')
parser.add_argument(
'-b',
help=(
'optional - broker url for Celery'),
required=False,
dest='broker_url')
parser.add_argument(
'-B',
help=(
'optional - backend url for Celery'),
required=False,
dest='backend_url')
parser.add_argument(
'-k',
help=(
'optional - s3 access key'),
required=False,
dest='s3_access_key')
parser.add_argument(
'-s',
help=(
'optional - s3 secret key'),
required=False,
dest='s3_secret_key')
parser.add_argument(
'-a',
help=(
'optional - s3 address format: <host:port>'),
required=False,
dest='s3_address')
parser.add_argument(
'-S',
help=(
'optional - s3 ssl or not'),
required=False,
dest='s3_secure')
parser.add_argument(
'-u',
help=(
'optional - s3 bucket name'),
required=False,
dest='s3_bucket_name')
parser.add_argument(
'-c',
help=(
'optional - s3 compiled bucket name'),
required=False,
dest='s3_compiled_bucket_name')
parser.add_argument(
'-g',
help=(
'optional - s3 region name'),
required=False,
dest='s3_region_name')
parser.add_argument(
'-p',
help=(
'optional - redis_password'),
required=False,
dest='redis_password')
parser.add_argument(
'-r',
help=(
'optional - redis_address format: <host:port>'),
required=False,
dest='redis_address')
parser.add_argument(
'-n',
help=(
'optional - redis and s3 key name'),
required=False,
dest='keyname')
parser.add_argument(
'-m',
help=(
'optional - redis database number (0 by default)'),
required=False,
dest='redis_db')
parser.add_argument(
'-x',
help=(
'optional - redis expiration in seconds'),
required=False,
dest='redis_expire')
parser.add_argument(
'-d',
help=(
'debug'),
required=False,
dest='debug',
action='store_true')
args = parser.parse_args()
ticker = TICKER
ticker_id = TICKER_ID
ssl_options = SSL_OPTIONS
transport_options = TRANSPORT_OPTIONS
broker_url = WORKER_BROKER_URL
backend_url = WORKER_BACKEND_URL
celery_config_module = WORKER_CELERY_CONFIG_MODULE
include_tasks = INCLUDE_TASKS
s3_access_key = S3_ACCESS_KEY
s3_secret_key = S3_SECRET_KEY
s3_region_name = S3_REGION_NAME
s3_address = S3_ADDRESS
s3_secure = S3_SECURE
s3_bucket_name = S3_BUCKET
s3_compiled_bucket_name = S3_COMPILED_BUCKET
s3_key = S3_KEY
redis_address = REDIS_ADDRESS
redis_key = REDIS_KEY
redis_password = REDIS_PASSWORD
redis_db = REDIS_DB
redis_expire = REDIS_EXPIRE
debug = False
if args.ticker:
ticker = args.ticker.upper()
if args.ticker_id:
ticker = args.ticker_id
if args.broker_url:
broker_url = args.broker_url
if args.backend_url:
backend_url = args.backend_url
if args.s3_access_key:
s3_access_key = args.s3_access_key
if args.s3_secret_key:
s3_secret_key = args.s3_secret_key
if args.s3_region_name:
s3_region_name = args.s3_region_name
if args.s3_address:
s3_address = args.s3_address
if args.s3_secure:
s3_secure = args.s3_secure
if args.s3_bucket_name:
s3_bucket_name = args.s3_bucket_name
if args.s3_compiled_bucket_name:
s3_compiled_bucket_name = args.s3_compiled_bucket_name
if args.keyname:
s3_key = args.keyname
redis_key = args.keyname
if args.redis_address:
redis_address = args.redis_address
if args.redis_password:
redis_password = args.redis_password
if args.redis_db:
redis_db = args.redis_db
if args.redis_expire:
redis_expire = args.redis_expire
if args.debug:
debug = True
work = build_publish_ticker_aggregate_from_s3_request()
work['ticker'] = ticker
work['ticker_id'] = ticker_id
work['s3_bucket'] = s3_bucket_name
work['s3_compiled_bucket'] = s3_compiled_bucket_name
if args.keyname:
work['s3_key'] = s3_key
work['redis_key'] = redis_key
work['s3_access_key'] = s3_access_key
work['s3_secret_key'] = s3_secret_key
work['s3_region_name'] = s3_region_name
work['s3_address'] = s3_address
work['s3_secure'] = s3_secure
work['redis_address'] = redis_address
work['redis_password'] = <PASSWORD>
work['redis_db'] = redis_db
work['redis_expire'] = redis_expire
work['debug'] = debug
work['label'] = f'ticker={ticker}'
path_to_tasks = 'analysis_engine.work_tasks'
task_name = (
f'{path_to_tasks}.publish_ticker_aggregate_from_s3.'
'publish_ticker_aggregate_from_s3')
task_res = None
if is_celery_disabled():
work['celery_disabled'] = True
log.debug(
f'starting without celery work={ppj(work)}')
task_res = task_publisher.publish_ticker_aggregate_from_s3(
work_dict=work)
if debug:
log.info(
f'done - result={ppj(task_res)} task={task_name} '
f'status={get_status(status=task_res["status"])} '
f'err={task_res["err"]} label={work["label"]}')
else:
log.info(
f'done - result task={task_name} '
f'status={get_status(status=task_res["status"])} '
f'err={task_res["err"]} label={work["label"]}')
# if/else debug
else:
log.info(f'connecting to broker={broker_url} backend={backend_url}')
# Get the Celery app
app = get_celery_app(
name=__name__,
auth_url=broker_url,
backend_url=backend_url,
path_to_config_module=celery_config_module,
ssl_options=ssl_options,
transport_options=transport_options,
include_tasks=include_tasks)
log.info(f'calling task={task_name} - work={ppj(work)}')
job_id = app.send_task(
task_name,
(work,))
log.info(f'calling task={task_name} - success job_id={job_id}')
# end of if/else
# end of publish_ticker_aggregate_from_s3
if __name__ == '__main__':
publish_ticker_aggregate_from_s3()
|
UEM-Samples/Utilities and Tools/Generic/App Upload/Mobile CICD Script/api_client/application_extensive_search.py | dholliman/euc-samples | 127 | 12684051 | import requests
import json
from api_client.url_helpers.apps_url import get_apps_search_url
from config import config
from Logs.log_configuration import configure_logger
from models.api_header_model import RequestHeader
log = configure_logger('default')
def search_application(bundle_id):
"""
Search for applications with the given Bundle ID
:param bundle_id: Bundle ID (App Identifier)
:return: True/False indicating Success/Failure and Application_list that matches the given Bundle ID
"""
api_url = get_apps_search_url()
headers = RequestHeader().header
api_params = {
'type': 'App',
'applicationtype': 'Internal',
'bundleid': bundle_id,
'locationgroupid': config.TENANT_GROUP_ID,
'productcomponentappsonly': 'False'
}
try:
response = requests.get(api_url, headers=headers, params=api_params)
if not response.ok:
log.error(f'{response.status_code}, {response.reason}, {response.content}') # HTTP
return False, 0
else:
response_data = json.loads(response.content)
app_list = response_data['Application']
return True, app_list
except Exception as e:
log.error('Application Search failed: {}'.format(str(e)))
return False
|
pybamm/models/submodels/convection/base_convection.py | manjunathnilugal/PyBaMM | 330 | 12684054 | <reponame>manjunathnilugal/PyBaMM<filename>pybamm/models/submodels/convection/base_convection.py
#
# Base class for convection submodels
#
import pybamm
class BaseModel(pybamm.BaseSubModel):
"""Base class for convection submodels.
Parameters
----------
param : parameter class
The parameters to use for this submodel
options : dict, optional
A dictionary of options to be passed to the model.
**Extends:** :class:`pybamm.BaseSubModel`
"""
def __init__(self, param, options=None):
super().__init__(param, options=options)
def _get_standard_whole_cell_velocity_variables(self, variables):
"""
A private function to obtain the standard variables which
can be derived from the fluid velocity.
Parameters
----------
variables : dict
The existing variables in the model
Returns
-------
variables : dict
The variables which can be derived from the volume-averaged
velocity.
"""
vel_scale = self.param.velocity_scale
if self.half_cell:
v_box_n = None
else:
v_box_n = variables["Negative electrode volume-averaged velocity"]
v_box_s = variables["Separator volume-averaged velocity"]
v_box_p = variables["Positive electrode volume-averaged velocity"]
v_box = pybamm.concatenation(v_box_n, v_box_s, v_box_p)
variables = {
"Volume-averaged velocity": v_box,
"Volume-averaged velocity [m.s-1]": vel_scale * v_box,
}
return variables
def _get_standard_whole_cell_acceleration_variables(self, variables):
"""
A private function to obtain the standard variables which
can be derived from the fluid velocity.
Parameters
----------
variables : dict
The existing variables in the model
Returns
-------
variables : dict
The variables which can be derived from the volume-averaged
velocity.
"""
acc_scale = self.param.velocity_scale / self.param.L_x
if self.half_cell:
div_v_box_n = None
else:
div_v_box_n = variables["Negative electrode volume-averaged acceleration"]
div_v_box_s = variables["Separator volume-averaged acceleration"]
div_v_box_p = variables["Positive electrode volume-averaged acceleration"]
div_v_box = pybamm.concatenation(div_v_box_n, div_v_box_s, div_v_box_p)
div_v_box_av = pybamm.x_average(div_v_box)
variables = {
"Volume-averaged acceleration": div_v_box,
"X-averaged volume-averaged acceleration": div_v_box_av,
"Volume-averaged acceleration [m.s-1]": acc_scale * div_v_box,
"X-averaged volume-averaged acceleration [m.s-1]": acc_scale * div_v_box_av,
}
return variables
def _get_standard_whole_cell_pressure_variables(self, variables):
"""
A private function to obtain the standard variables which
can be derived from the pressure in the fluid.
Parameters
----------
variables : dict
The existing variables in the model
Returns
-------
variables : dict
The variables which can be derived from the pressure.
"""
if self.half_cell:
p_n = None
else:
p_n = variables["Negative electrode pressure"]
p_s = variables["Separator pressure"]
p_p = variables["Positive electrode pressure"]
p = pybamm.concatenation(p_n, p_s, p_p)
variables = {"Pressure": p}
return variables
|
maptrace.py | sdobz/maptrace | 120 | 12684088 | <reponame>sdobz/maptrace<filename>maptrace.py
# -*- encoding: utf-8 -*-
import sys, re, os, argparse, heapq
from datetime import datetime
from collections import namedtuple, defaultdict
import numpy as np
from PIL import Image
from scipy import ndimage
######################################################################
DIR_RIGHT = 0
DIR_DOWN = 1
DIR_LEFT = 2
DIR_UP = 3
NEIGHBOR_OFFSET = np.array([
[ 0, 1 ],
[ 1, 0 ],
[ 0, -1 ],
[ -1, 0 ]
])
TURN_RIGHT = np.array([ DIR_DOWN, DIR_LEFT, DIR_UP, DIR_RIGHT ])
TURN_LEFT = np.array([ DIR_UP, DIR_RIGHT, DIR_DOWN, DIR_LEFT ])
VMAP_OFFSET = np.array([
[ -1, 0, 0 ],
[ 0, 0, 1 ],
[ 0, 0, 0 ],
[ 0, -1, 1 ]
])
DIAG_OFFSET = NEIGHBOR_OFFSET + NEIGHBOR_OFFSET[TURN_LEFT]
OPP_OFFSET = NEIGHBOR_OFFSET[TURN_LEFT]
CROSS_ELEMENT = np.array([[0,1,0],[1,1,1],[0,1,0]],dtype=np.bool)
BOX_ELEMENT = np.ones((3,3), dtype=np.bool)
######################################################################
# Some helper classes
EdgeInfo = namedtuple('EdgeInfo', ['node0', 'node1', 'label0', 'label1'])
EdgeRef = namedtuple('EdgeRef', ['edge_index', 'opp_label', 'step'])
######################################################################
# Class to store boundary representation for our map
class BoundaryRepresentation(object):
def __init__(self):
# list of nodes (points) or None for deleted
self.node_list = []
# list of sets of edge indices
self.node_edges = []
# list of point arrays (or empty for deleted edges)
self.edge_list = []
# list of EdgeInfo (or None for deleted edges)
self.edge_infolist = []
# map from point to node index
self.node_lookup = dict()
# map from EdgeInfo to edge index
self.edge_lookup = dict()
# map from label to list of list of EdgeRef
self.label_lookup = defaultdict(list)
def lookup_node(self, point, insert=False):
key = tuple(map(float, point))
if insert and key not in self.node_lookup:
node_idx = len(self.node_list)
self.node_list.append(point.copy())
self.node_edges.append(set())
self.node_lookup[key] = node_idx
else:
node_idx = self.node_lookup[key]
return node_idx
def add_edges(self, cur_label, contour_edges):
edge_refs = []
for opp_label, edge in contour_edges:
assert cur_label != opp_label
assert cur_label != 0
label0 = min(cur_label, opp_label)
label1 = max(cur_label, opp_label)
if label0 == cur_label:
step = 1
else:
step = -1
edge_to_add = edge[::step]
node0 = self.lookup_node(edge_to_add[0], insert=True)
node1 = self.lookup_node(edge_to_add[-1], insert=True)
edge_info = EdgeInfo(node0, node1, label0, label1)
if edge_info in self.edge_lookup:
edge_idx = self.edge_lookup[edge_info]
stored_edge = self.edge_list[edge_idx]
assert self.edge_infolist[edge_idx] == edge_info
assert np.all(stored_edge == edge_to_add)
assert edge_idx in self.node_edges[node0]
assert edge_idx in self.node_edges[node1]
else:
edge_idx = len(self.edge_list)
self.edge_list.append( edge_to_add )
self.edge_infolist.append( edge_info )
self.edge_lookup[edge_info] = edge_idx
self.node_edges[node0].add( edge_idx )
self.node_edges[node1].add( edge_idx )
edge_refs.append(EdgeRef(edge_idx, opp_label, step))
self.label_lookup[cur_label].append( edge_refs)
def replace_endpoints(self, edge_idx, na, nb, nc):
edge = self.edge_list[edge_idx]
edge_info = self.edge_infolist[edge_idx]
assert (edge_info.node0 == na or edge_info.node0 == nb or
edge_info.node1 == na or edge_info.node1 == nb)
n0 = None
n1 = None
if edge_info.node0 == na:
n0 = na
new_n0 = nc
elif edge_info.node0 == nb:
n0 = nb
new_n0 = nc
else:
new_n0 = edge_info.node0
if edge_info.node1 == na:
n1 = na
new_n1 = nc
elif edge_info.node1 == nb:
n1 = nb
new_n1 = nc
else:
new_n1 = edge_info.node1
if n0 is not None and n1 is not None:
self.edge_list[edge_idx] = edge[:0]
self.edge_infolist[edge_idx] = None
# NB we will rebuild label_lookup after all merges
return
self.node_edges[nc].add(edge_idx)
pc = self.node_list[nc]
for node_idx, which_end, lo, hi in [(n0, 0, 1, 0), (n1, -1, 0, 1)]:
if node_idx is None:
continue
p = self.node_list[node_idx]
delta = (pc - p).reshape(1, 2)
u = np.linspace(lo, hi, len(edge)).reshape(-1, 1)
edge = edge + delta * u
edge[which_end] = pc
edge_info = EdgeInfo(new_n0, new_n1, edge_info.label0, edge_info.label1)
self.edge_list[edge_idx] = edge
self.edge_infolist[edge_idx] = edge_info
assert np.all(edge[0] == self.node_list[edge_info.node0])
assert np.all(edge[-1] == self.node_list[edge_info.node1])
def merge_nodes(self, tol):
node_points = np.array(self.node_list)
rng = range(len(node_points))
i, j = np.meshgrid(rng, rng)
use = i > j
i = i[use]
j = j[use]
ni = node_points[i]
nj = node_points[j]
dists = np.linalg.norm(ni - nj, axis=1)
heap = list(zip(dists, i, j))
heapq.heapify(heap)
retired_nodes = set()
active_nodes = set(rng)
while len(heap):
dmin, na, nb = heapq.heappop(heap)
assert na > nb
if dmin > tol:
break
if na in retired_nodes or nb in retired_nodes:
continue
print(' merge nodes {} and {} with distance {}'.format(
na, nb, dmin))
pa = self.node_list[na]
pb = self.node_list[nb]
pc = 0.5*(pa + pb)
nc = len(self.node_list)
nkey = tuple(map(float, pc))
self.node_list.append(pc.copy())
self.node_edges.append(set())
self.node_lookup[nkey] = nc
assert self.lookup_node(pc) == nc
for node_idx in (na, nb):
for edge_idx in self.node_edges[node_idx]:
if self.edge_infolist[edge_idx] is not None:
self.replace_endpoints(edge_idx, na, nb, nc)
for node_idx in (na, nb):
p = self.node_list[node_idx]
pkey = tuple(map(float, p))
del self.node_lookup[pkey]
self.node_list[node_idx] = None
self.node_edges[node_idx] = set()
retired_nodes.add(node_idx)
active_nodes.remove(node_idx)
for nj in active_nodes:
pj = self.node_list[nj]
dcj = np.linalg.norm(pc - pj)
hkey = (dcj, nc, nj)
heapq.heappush(heap, hkey)
active_nodes.add(nc)
# rebuild label lookup
new_label_lookup = dict()
for label, contours in self.label_lookup.items():
new_contours = []
for contour in contours:
new_contour = []
for edge_ref in contour:
idx, _, _ = edge_ref
if self.edge_infolist[idx] is not None:
new_contour.append(edge_ref)
if len(new_contour):
new_contours.append(new_contour)
if len(new_contours):
new_label_lookup[label] = new_contours
else:
print('totally deleted label {}!'.format(label))
self.label_lookup = new_label_lookup
def save_debug_image(self, opts, orig_shape, colors, name):
filename = opts.basename + '_debug_' + name + '.svg'
with open(filename, 'w') as svg:
svg.write('<svg width="{}" height="{}" '
'xmlns="http://www.w3.org/2000/svg">\n'.
format(orig_shape[1], orig_shape[0]))
svg.write(' <rect width="100%" height="100%" fill="#eee" />\n')
for ilabel in range(2):
if ilabel == 0:
svg.write(' <g stroke-linejoin="miter" stroke-width="4" fill="none">\n')
else:
svg.write(' <g stroke-linejoin="miter" stroke-width="4" fill="none" stroke-dasharray="8, 8" >\n')
for edge, einfo in zip(self.edge_list, self.edge_infolist):
svg.write(' <path d="')
last = np.array([0,0])
for i, pt in enumerate(edge):
pt = pt.astype(int)
if i == 0:
svg.write('M{},{}'.format(pt[0], pt[1]))
else:
diff = pt - last
if diff[1] == 0:
svg.write('h{}'.format(diff[0]))
elif diff[0] == 0:
svg.write('v{}'.format(diff[1]))
else:
svg.write('l{},{}'.format(*diff))
last = pt
color = colors[einfo.label0 if ilabel == 0 else einfo.label1]
svg.write('" stroke="#{:02x}{:02x}{:02x}" />\n'.format(*color))
svg.write(' </g>\n')
svg.write(' <g stroke="none" fill="#000">\n')
for pt in self.node_list:
svg.write(' <circle cx="{}" cy="{}" r="4" />\n'.format(*pt))
svg.write(' </g>\n')
svg.write('</svg>\n')
print('wrote', filename)
######################################################################
# Input is string, output is pair (string, lambda image -> image)
def filter_type(fstr):
m = re.match(r'^\s*([a-z]+)\s*:\s*([a-z]+)\s*,\s*([1-9][0-9]*)\s*$', fstr)
if m is None:
raise argparse.ArgumentTypeError('invalid filter string')
operation = m.group(1)
element = m.group(2)
iterations = int(m.group(3))
fnmap = dict(
open=ndimage.binary_opening,
close=ndimage.binary_closing,
dilate=ndimage.binary_dilation,
erode=ndimage.binary_erosion)
if operation not in fnmap.keys():
raise argparse.ArgumentTypeError('invalid operation ' + operation)
if element == 'box':
element = BOX_ELEMENT
elif element == 'cross':
element = CROSS_ELEMENT
else:
raise argparse.ArgumentTypeError('invalid element ' + element)
f = lambda img: fnmap[operation](img, element, iterations=iterations)
return fstr, f
######################################################################
# Confirm with [y/n]
def confirm(prompt):
while True:
print(prompt + ' [y/n]: ', end='')
sys.stdout.flush()
choice = input().lower()
if choice in ['y', 'yes']:
return True
elif choice in ['n', 'no']:
return False
else:
print('invalid choice')
######################################################################
# Parse command-line options, return namespace containing results
def get_options():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('image', type=argparse.FileType('rb'),
metavar='IMAGE.png', nargs='?',
help='image to approximate')
parser.add_argument('-z', '--zoom', type=float, metavar='ZOOM',
default=1.0,
help='amount to resize image on load')
parser.add_argument('-t', '--threshold', type=int, metavar='T',
default=64,
help='intensity threshold for outlines')
parser.add_argument('-a', '--alpha-threshold', type=int, metavar='T',
default=127,
help='threshold for alpha channel')
parser.add_argument('-C', '--connectivity', choices=('4','8'),
default='4',
help='connectivity of non-outline regions')
parser.add_argument('-f', '--filter', type=filter_type, default=None,
help='filter for preprocessing outline map '
'after thresholding but before connected '
'component analysis; must be of the format '
'(erode|dilate|open|close):(box|cross),ITERATIONS '
'e.g., erode:cross,1')
parser.add_argument('-e', '--edge-tol', type=float, metavar='E',
default='1.42',
help='tolerance in px for simplifying edges')
parser.add_argument('-n', '--node-tol', type=float, metavar='N',
default=0,
help='tolerance in px for merging nodes')
parser.add_argument('-o', '--output-file', type=str, metavar='FILENAME.svg',
default=None,
help='output SVG file name')
parser.add_argument('-s', '--stroke-width', type=float, metavar='S',
default=1.0,
help='output SVG stroke width')
parser.add_argument('-b', '--bg-stroke-width', type=float, metavar='S',
default=None,
help='output SVG stroke width for largest region')
parser.add_argument('-d', '--debug-images', action='store_true',
help='generate debug images')
parser.add_argument('-D', '--allow-dark-colors', action='store_true',
help='flag to prevent applying grayscale threshold '
'to image supplied with -c')
parser.add_argument('-m', '--min-area', type=int, metavar='A',
default=1, help='minimum region area in pixels')
parser.add_argument('-c', '--color-image', type=argparse.FileType('rb'),
default=None, help='image to supply color for output map')
parser.add_argument('-q', '--color-quantize-bits', type=int,
default=8, help='quantization for finding region '
'colors with -c')
parser.add_argument('-r', '--random-colors', action='store_true',
help='color regions randomly')
parser.add_argument('-R', '--random-seed', type=int,
help='random seed for colors')
parser.add_argument('-y', '--overwrite', action='store_true',
help='overwrite output')
parser.add_argument('-S', '--solid-colors', action='store_true',
help='input image is solid colors with no outlines')
opts = parser.parse_args()
if opts.image is None:
if opts.color_image is None:
print('error: must provide image filename or set color image with -c')
sys.exit(1)
else:
opts.image = open(opts.color_image.name, 'rb')
basename = os.path.basename(opts.image.name)
opts.basename, _ = os.path.splitext(basename)
if opts.bg_stroke_width is None:
opts.bg_stroke_width = opts.stroke_width
if opts.output_file is None:
opts.output_file = opts.basename + '.svg'
if os.path.exists(opts.output_file) and not opts.overwrite:
if not confirm(opts.output_file + ' exists. Overwrite?'):
print('will not overwite output, exiting')
sys.exit(1)
return opts
######################################################################
# Downsample pixel values, rounding to center of bins.
def quantize(image, bits_per_channel=None):
if bits_per_channel is None:
bits_per_channel = 8
assert image.dtype == np.uint8
shift = 8-bits_per_channel
halfbin = (1 << shift) >> 1
return ((image.astype(int) >> shift) << shift) + halfbin
######################################################################
# Pack RGB triplets into ints
def pack_rgb(rgb):
orig_shape = None
if isinstance(rgb, np.ndarray):
assert rgb.shape[-1] == 3
orig_shape = rgb.shape[:-1]
else:
assert len(rgb) == 3
rgb = np.array(rgb)
rgb = rgb.astype(int).reshape((-1, 3))
packed = (rgb[:, 0] << 16 |
rgb[:, 1] << 8 |
rgb[:, 2])
if orig_shape is None:
return packed
else:
return packed.reshape(orig_shape)
######################################################################
# Unpack ints to RGB triplets
def unpack_rgb(packed):
orig_shape = None
if isinstance(packed, np.ndarray):
assert packed.dtype == int
orig_shape = packed.shape
packed = packed.reshape((-1, 1))
rgb = ((packed >> 16) & 0xff,
(packed >> 8) & 0xff,
(packed) & 0xff)
if orig_shape is None:
return rgb
else:
return np.hstack(rgb).reshape(orig_shape + (3,))
######################################################################
# Get the dominant color in a list of colors (with optional
# quantization)
def get_dominant_color(colors, bits_per_channel=None):
assert colors.shape[-1] == 3
quantized = quantize(colors, bits_per_channel).astype(int)
packed = pack_rgb(quantized)
unique, counts = np.unique(packed, return_counts=True)
packed_mode = unique[counts.argmax()]
return unpack_rgb(packed_mode)
######################################################################
# Save a debug image if allowed
def save_debug_image(opts, name, image):
if not opts.debug_images:
return
if isinstance(image, np.ndarray):
if image.dtype == np.bool:
image = (image.astype(np.uint8) * 255)
if len(image.shape) == 2:
mode = 'L'
else:
mode = 'RGB'
image = Image.fromarray(image, mode)
filename = opts.basename + '_debug_' + name + '.png'
image.save(filename)
print('wrote', filename)
######################################################################
# Open an input image and get the RGB colors as well as the mask
def get_mask(input_image, opts):
rgb = input_image
alpha = None
if (rgb.mode == 'LA' or
(rgb.mode == 'P' and 'transparency' in rgb.info)):
rgb = rgb.convert('RGBA')
if rgb.mode == 'RGBA':
alpha = np.array(rgb.split()[-1])
rgb = rgb.convert('RGB')
rgb = np.array(rgb)
gray = rgb.max(axis=2)
mask = (gray > opts.threshold)
if alpha is not None:
mask = mask | (alpha < opts.alpha_threshold)
save_debug_image(opts, 'mask', mask)
if opts.filter is not None:
print('applying filter:', opts.filter[0])
mask = opts.filter[1](mask)
save_debug_image(opts, 'mask_filtered', mask)
return mask
######################################################################
def printp(*args):
print(*args, end='')
sys.stdout.flush()
######################################################################
def get_labels_and_colors_outlined(mask, opts):
if opts.connectivity == '8':
structure = BOX_ELEMENT
else:
structure = CROSS_ELEMENT
labels, num_labels = ndimage.label(mask, structure=structure)
print('found {} labels'.format(num_labels))
unlabeled = ~mask
printp('computing areas... ')
start = datetime.now()
areas, bins = np.histogram(labels.flatten(),
bins=num_labels,
range=(1, num_labels+1))
elapsed = (datetime.now() - start).total_seconds()
print('finished computing areas in {} seconds.'.format(elapsed))
idx = np.hstack( ([0], np.argsort(-areas)+1) )
replace = np.zeros_like(idx)
replace[idx] = range(len(idx))
labels = replace[labels]
areas = areas[idx[1:]-1]
print('min area is {}, max is {}'.format(areas[-1], areas[0]))
if opts.min_area > areas[-1]:
print('killing all labels with area < {} px'.format(opts.min_area))
kill_labels = np.nonzero(areas < opts.min_area)[0]
num_labels = kill_labels.min()
kill_mask = (labels > num_labels)
save_debug_image(opts, 'kill_labels', kill_mask)
unlabeled = unlabeled | kill_mask
print('killed {} labels, now at {} total'.format(
len(kill_labels), num_labels))
colors = 255*np.ones((num_labels+1,3), dtype=np.uint8)
if opts.color_image is not None:
color_image = Image.open(opts.color_image)
labels_size = labels.shape[::-1]
if color_image.size != labels_size:
color_image = color_image.resize(labels_size, Image.NEAREST)
color_image = np.array(color_image.convert('RGB'))
print('assigning colors from {}...'.format(opts.color_image.name))
slices = ndimage.find_objects(labels, num_labels)
for label, (yslc, xslc) in zip(range(1, num_labels+1), slices):
print(' coloring label {}/{}'.format(label, num_labels))
lmask = (labels[yslc,xslc] == label)
crect = color_image[yslc,xslc]
if not opts.allow_dark_colors:
lmask = lmask & (crect.max(axis=2) > opts.threshold)
if not np.any(lmask):
print('no colors available for label {}, '
'try running with -D?'.format(label))
else:
colors[label] = get_dominant_color(crect[lmask],
opts.color_quantize_bits)
elif opts.random_colors:
if opts.random_seed is not None:
np.random.seed(opts.random_seed)
colors = np.random.randint(128, size=(num_labels+1,3),
dtype=np.uint8) + 128
colors[0,:] = 255
save_debug_image(opts, 'regions', colors[labels])
printp('running DT... ')
start = datetime.now()
result = ndimage.distance_transform_edt(unlabeled,
return_distances=opts.debug_images,
return_indices=True)
if opts.debug_images:
dist, idx = result
dist /= dist.max()
dist = (dist*255).astype(np.uint8)
save_debug_image(opts, 'dist', dist)
else:
idx = result
elapsed = (datetime.now() - start).total_seconds()
print('ran DT in {} seconds'.format(elapsed))
labels = labels[tuple(idx)]
assert not np.any(labels == 0)
labels_big = np.zeros((labels.shape[0]+2,labels.shape[1]+2),
dtype=labels.dtype)
labels_big[1:-1,1:-1] = labels
start = datetime.now()
printp('finding objects... ')
slices = ndimage.find_objects(labels, num_labels)
elapsed = (datetime.now() - start).total_seconds()
print('found all objects in {} seconds'.format(elapsed))
slices_big = []
for spair in slices:
spair_big = []
for s in spair:
spair_big.append(slice(s.start, s.stop+2))
slices_big.append( tuple(spair_big) )
assert labels_big.min() == 0 and labels_big.max() == num_labels
assert len(slices) == num_labels
save_debug_image(opts, 'regions_expanded', colors[labels_big[1:-1, 1:-1]])
return num_labels, labels_big, slices_big, colors
######################################################################
def get_labels_and_colors_solid(input_image, opts):
array = np.array(input_image)
print(array.shape, array.dtype)
if len(array.shape) == 2:
flattened = array.flatten()
axis = None
else:
assert len(array.shape) == 3
flattened = array.reshape(-1, array.shape[2])
axis = 0
unique, ulabels = np.unique(flattened,
axis=axis,
return_inverse=True)
ucount = len(unique)
# go from bright to dark
unique = unique[::-1]
ulabels = ucount - ulabels - 1
ulabels = ulabels.reshape(array.shape[:2])
print('unique:', unique)
print('ulabels:', ulabels)
rgb = np.array(input_image.convert('RGB'))
colors = []
labels = np.zeros(array.shape[:2], dtype=int)
max_label = 0
slices = []
for ulabel in range(ucount):
mask = (ulabels == ulabel)
yidx, xidx = np.nonzero(mask)
color = rgb[yidx[0], xidx[0]]
if ulabel == 0: # background
colors.append(color)
else:
sublabels, num_features = ndimage.label(mask)
print('found {} sublabels for {}'.format(
num_features, color))
subslices = ndimage.find_objects(sublabels,
num_features)
labels[mask] = sublabels[mask] + max_label
max_label += num_features
assert labels.max() == max_label
slices.extend(subslices)
colors.extend([color] * num_features)
colors = np.array(colors)
colors[0,:] = 255
randocolors = np.random.randint(128, size=(max_label+1, 3),
dtype=np.uint8) + 128
if opts.random_colors:
colors = randocolors
save_debug_image(opts, 'labels', randocolors[labels])
slices_big = []
for spair in slices:
spair_big = []
for s in spair:
spair_big.append(slice(s.start, s.stop+2))
slices_big.append( tuple(spair_big) )
return max_label, labels, slices_big, colors
######################################################################
def follow_contour(l_subrect, cur_label,
startpoints, pos):
start = pos
cur_dir = DIR_RIGHT
contour_info = []
while True:
ooffs = OPP_OFFSET[cur_dir]
noffs = NEIGHBOR_OFFSET[cur_dir]
doffs = DIAG_OFFSET[cur_dir]
neighbor = tuple(pos + noffs)
diag = tuple(pos + doffs)
opp = tuple(pos + ooffs)
assert l_subrect[pos] == cur_label
assert l_subrect[opp] != cur_label
contour_info.append( pos + (cur_dir, l_subrect[opp]) )
startpoints[pos] = False
if l_subrect[neighbor] != cur_label:
cur_dir = TURN_RIGHT[cur_dir]
elif l_subrect[diag] == cur_label:
pos = diag
cur_dir = TURN_LEFT[cur_dir]
else:
pos = neighbor
if pos == start and cur_dir == DIR_RIGHT:
break
n = len(contour_info)
contour_info = np.array(contour_info)
clabels = contour_info[:,3]
# set of unique labels for this contour
opp_label_set = set(clabels)
assert cur_label not in opp_label_set
# if multiple labels and one wraps around, correct this
if len(opp_label_set) > 1 and clabels[0] == clabels[-1]:
idx = np.nonzero(clabels != clabels[0])[0][0]
perm = np.hstack( (np.arange(idx, n),
np.arange(idx)) )
contour_info = contour_info[perm]
clabels = contour_info[:,3]
# make sure no wraparound
assert len(opp_label_set) == 1 or clabels[0] != clabels[-1]
# apply offset to get contour points
cpoints = contour_info[:,:2].astype(np.float32)
cdirs = contour_info[:,2]
cpoints += 0.5 * (OPP_OFFSET[cdirs] - NEIGHBOR_OFFSET[cdirs] + 1)
# put points in xy format
cpoints = cpoints[:,::-1]
if len(opp_label_set) == 1:
idx = np.arange(len(cpoints))
xyi = zip(cpoints[:,0], cpoints[:,1], idx)
imin = min(xyi)
i = imin[-1]
cpoints = np.vstack( ( cpoints[i:], cpoints[:i] ) )
assert np.all(clabels == clabels[0])
return cpoints, clabels
######################################################################
def split_contour(cpoints, clabels):
edges = []
shifted = np.hstack(( [-1], clabels[:-1] ))
istart = np.nonzero( clabels - shifted )[0]
iend = np.hstack( (istart[1:], len(clabels)) )
for start, end in zip(istart, iend):
assert start == 0 or clabels[start] != clabels[start-1]
assert clabels[end-1] == clabels[start]
opp_label = clabels[start]
if end < len(cpoints):
edge = cpoints[start:end+1]
else:
edge = np.vstack( (cpoints[start:end], cpoints[0]) )
edges.append( (opp_label, edge) )
start = end
return edges
######################################################################
def store_contour_edges(opts, labels,
edge_lookup, edge_list,
cur_label, contour_edges):
edge_refs = []
for opp_label, edge in contour_edges:
assert cur_label != opp_label
assert cur_label != 0
print(' storing contour edge with cur={}, opp={}'.format(
cur_label, opp_label))
lmin = min(cur_label, opp_label)
lmax = max(cur_label, opp_label)
if lmin == cur_label:
step = 1
else:
step = -1
edge_to_add = edge[::step]
p0 = tuple(map(float, edge_to_add[0]))
p1 = tuple(map(float, edge_to_add[1]))
key = (lmin, lmax, p0, p1)
if key in edge_lookup:
idx = edge_lookup[key]
if not np.all(edge_list[idx] == edge_to_add):
debug = 255*np.ones(labels.shape + (3,), dtype=np.uint8)
debug[labels == cur_label] = (255, 0, 0)
debug[labels == opp_label] = (0, 0, 255)
save_debug_image(opts, 'debug_edge', debug)
print('not forward/backward symmetric!')
print(type(edge_to_add))
print(type(edge_list[idx]))
print(edge_list[idx].shape, edge_list[idx].dtype)
print(edge_to_add.shape, edge_to_add.dtype)
print(edge_to_add == edge_list[idx])
assert np.all(edge_list[idx] == edge_to_add)
else:
idx = len(edge_list)
edge_list.append( edge_to_add )
edge_lookup[key] = idx
edge_refs.append( (idx, opp_label, step) )
return edge_refs
######################################################################
def _simplify_r(opts, p0, edge, output_list):
assert np.all( output_list[-1][-1] == p0 )
assert not np.all(edge[0] == p0)
p1 = edge[-1]
if len(edge) == 1:
output_list.append(edge)
return
l = np.cross([p0[0], p0[1], 1], [p1[0], p1[1], 1])
n = l[:2]
w = np.linalg.norm(n)
if w == 0:
dist = np.linalg.norm(edge - p0, axis=1)
idx = dist.argmax()
dmax = np.inf
else:
l /= w
dist = np.abs( np.dot(edge, l[:2]) + l[2] )
idx = dist.argmax()
dmax = dist[idx]
if dmax < opts.edge_tol:
output_list.append(np.array([p1]))
elif len(edge) > 3:
_simplify_r(opts, p0, edge[:idx+1], output_list)
_simplify_r(opts, edge[idx], edge[idx+1:], output_list)
else:
output_list.append(edge)
######################################################################
def simplify(opts, edge):
if not len(edge):
return edge
p0 = edge[0]
output_list = [ edge[[0]] ]
_simplify_r(opts, p0, edge[1:], output_list)
return np.vstack( tuple(output_list) )
######################################################################
def build_brep(opts, num_labels, labels, slices, colors):
brep = BoundaryRepresentation()
label_range = range(1, num_labels+1)
print('building boundary representation...')
# for each object
for cur_label, (yslc, xslc) in zip(label_range, slices):
p0 = (xslc.start-1, yslc.start-1)
# extract sub-rectangle for this label
l_subrect = labels[yslc, xslc]
# get binary map of potential start points for contour in
# rightward direction
mask_subrect = (l_subrect == cur_label)
mask_shifted_down = np.vstack(
(np.zeros_like(mask_subrect[0].reshape(1,-1)),
mask_subrect[:-1]))
startpoints = mask_subrect & ~mask_shifted_down
print(' processing label {}/{} with area {}'.format(
cur_label, num_labels, (l_subrect == cur_label).sum()))
# while there are candidate locations to start at
while np.any(startpoints):
# get the first one
i, j = np.nonzero(startpoints)
pos = (i[0], j[0])
# extract points and adjacent labels along contour,
# this modifies startpoints
cpoints, clabels = follow_contour(l_subrect, cur_label,
startpoints, pos)
cpoints += p0
# split contour into (opp_label, points) pairs
contour_edges = split_contour(cpoints, clabels)
# add them to our boundary representation
brep.add_edges(cur_label, contour_edges)
if opts.debug_images:
orig_shape = (labels.shape[0]-2, labels.shape[1]-2)
brep.save_debug_image(opts, orig_shape, colors, 'brep')
simplified = False
if opts.node_tol > 0:
print('merging all nodes closer than {} px...'.format(opts.node_tol))
brep.merge_nodes(opts.node_tol)
simplified = True
if opts.edge_tol > 0:
print('simplifying edges...')
brep.edge_list = [ simplify(opts, edge) for edge in brep.edge_list ]
simplified = True
if opts.debug_images and simplified:
orig_shape = (labels.shape[0]-2, labels.shape[1]-2)
brep.save_debug_image(opts, orig_shape, colors, 'brep_simplified')
return brep
######################################################################
def num_fmt(n):
s = '{:.2f}'.format(n)
if '.' in s:
s = re.sub(r'\.?0+$', '', s)
return s
def output_svg(opts, orig_shape, brep, colors):
with open(opts.output_file, 'w') as svg:
svg.write('<svg width="{}" height="{}" '
'xmlns="http://www.w3.org/2000/svg">\n'.
format(orig_shape[1], orig_shape[0]))
svg.write(' <g stroke="#000" stroke-linejoin="bevel" '
'stroke-width="{}">\n'.format(opts.stroke_width))
cpacked = pack_rgb(colors.astype(int))
cset = set(cpacked)
lsets = []
for c in cset:
idx = np.nonzero(cpacked == c)[0]
if 1 in idx:
lsets.insert(0, idx)
else:
lsets.append(idx)
assert 1 in lsets[0]
for lset in lsets:
svg.write(' <g fill="#{:02x}{:02x}{:02x}">\n'.format(
*colors[lset[0]]))
for cur_label in lset:
if cur_label not in brep.label_lookup:
continue
contours = brep.label_lookup[cur_label]
svg.write(' <path d="')
for i, contour in enumerate(contours):
for j, (edge_idx, _, step) in enumerate(contour):
edge = brep.edge_list[edge_idx][::step]
iedge = edge.astype(int)
if np.all(edge == iedge):
pprev = iedge[0]
if j == 0:
svg.write('M{:d},{:d}'.format(*pprev))
for pt in iedge[1:]:
svg.write('l{:d},{:d}'.format(*(pt-pprev)))
pprev = pt
else:
if j == 0:
svg.write('M{},{}'.format(*map(num_fmt, edge[0])))
for pt in edge[1:]:
svg.write('L{},{}'.format(*map(num_fmt, pt)))
svg.write('Z')
svg.write('"')
if cur_label == 1 and opts.stroke_width != opts.bg_stroke_width:
svg.write(' stroke-width="{}"'.format(opts.bg_stroke_width))
svg.write('/>\n')
svg.write(' </g>\n')
svg.write(' </g>\n')
svg.write('</svg>\n')
print('wrote', opts.output_file)
######################################################################
def main():
opts = get_options()
input_image = Image.open(opts.image)
if opts.zoom != 1:
w, h = input_image.size
wnew = int(round(w*opts.zoom))
hnew = int(round(h*opts.zoom))
resample = Image.LANCZOS if opts.zoom > 1 else Image.LANCZOS
input_image = input_image.resize((wnew, hnew), resample)
save_debug_image(opts, 'resized', input_image)
if not opts.solid_colors:
mask = get_mask(input_image, opts)
# labels is a 2D array that ranges from 0 (background) to
# num_labels (inclusive), and slices are bounding rectangles for
# each non-zero label.
num_labels, labels, slices, colors = get_labels_and_colors_outlined(mask, opts)
else:
num_labels, labels, slices, colors = get_labels_and_colors_solid(input_image, opts)
assert len(slices) == num_labels
assert len(colors) == num_labels + 1
brep = build_brep(opts, num_labels, labels, slices, colors)
output_svg(opts, labels.shape, brep, colors)
######################################################################
if __name__ == '__main__':
main()
|
views/view_stream.py | TomasTorresB/nerve | 365 | 12684121 | <gh_stars>100-1000
import time
from core.security import session_required
from flask import Blueprint, Response, stream_with_context
stream = Blueprint('stream', __name__,
template_folder='templates')
@stream.route('/log')
@session_required
def view_stream():
def generate():
with open('logs/nerve.log') as f:
while True:
yield f.read()
time.sleep(1)
return Response(stream_with_context(generate()), mimetype='text/plain')
|
target_offer/026-树的子结构/sub_structure_tree.py | lesywix/oh-my-python | 107 | 12684146 | <gh_stars>100-1000
"""
提目:输入两棵二叉树A和B,判断B是不是A的子结构。
总结:使用递归,注意判断好结束条件
"""
import unittest
from collections import deque
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
return f'<{self.val}, {self.left}, {self.right}>'
# 树的一些基本算法
class BinaryTree(object):
def __init__(self, tree=None):
self.tree = tree
def construct_tree(self, l: TreeNode, d: TreeNode, r: TreeNode):
if not self.tree:
self.tree = d
d.left = l
d.right = r
def pre_traversal(self):
r = []
def f(t):
if not t:
return
r.append(t.val)
f(t.left)
f(t.right)
f(self.tree)
return r
def in_traversal(self):
r = []
def f(t):
if not t:
return
f(t.left)
r.append(t.val)
f(t.right)
f(self.tree)
return r
def post_traversal(self):
r = []
def f(t):
if not t:
return
f(t.left)
f(t.right)
r.append(t.val)
f(self.tree)
return r
def bfs(self):
r = []
q = deque([self.tree])
while q:
n = q.popleft()
if n:
r.append(n.val)
q.append(n.left)
q.append(n.right)
return r
def is_subtree(t1: TreeNode, t2: TreeNode):
r = False
if t1 and t2:
# 若根节点值相同,则判断此根节点下所有节点值是否相同,并保留结果 r
if t1.val == t2.val:
r = has_subtree(t1, t2)
# 如果上一个判断不成立,则判断 t1 的子节点
if not r:
r = is_subtree(t1.left, t2) or is_subtree(t1.right, t2)
return r
def has_subtree(t1, t2):
if not t2:
return True
if not t1:
return False
if t1.val != t2.val:
return False
return has_subtree(t1.left, t2.left) and has_subtree(t1.right, t2.right)
class Test(unittest.TestCase):
def test(self):
n1 = TreeNode(8)
n2 = TreeNode(8)
n3 = TreeNode(7)
n4 = TreeNode(9)
n5 = TreeNode(2)
n6 = TreeNode(4)
n7 = TreeNode(7)
m1 = TreeNode(8)
m2 = TreeNode(9)
m3 = TreeNode(2)
t1 = BinaryTree()
t1.construct_tree(n2, n1, n3)
t1.construct_tree(n4, n2, n5)
t1.construct_tree(n6, n5, n7)
t2 = BinaryTree()
t2.construct_tree(m2, m1, m3)
self.assertEqual(True, is_subtree(t1.tree, t2.tree))
|
torchbenchmark/models/pytorch_unet/pytorch_unet/train.py | yinghai/benchmark | 384 | 12684148 | <reponame>yinghai/benchmark
import argparse
import logging
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torch import optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from utils.data_loading import BasicDataset, CarvanaDataset
from utils.dice_score import dice_loss
from evaluate import evaluate
from unet import UNet
dir_img = Path('./data/imgs/')
dir_mask = Path('./data/masks/')
dir_checkpoint = Path('./checkpoints/')
def train_net(net,
device,
epochs: int = 5,
batch_size: int = 1,
learning_rate: float = 0.001,
val_percent: float = 0.1,
save_checkpoint: bool = True,
img_scale: float = 0.5,
amp: bool = False):
# 1. Create dataset
try:
dataset = CarvanaDataset(dir_img, dir_mask, img_scale)
except (AssertionError, RuntimeError):
dataset = BasicDataset(dir_img, dir_mask, img_scale)
# 2. Split into train / validation partitions
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train_set, val_set = random_split(dataset, [n_train, n_val], generator=torch.Generator().manual_seed(0))
# 3. Create data loaders
loader_args = dict(batch_size=batch_size, num_workers=4, pin_memory=True)
train_loader = DataLoader(train_set, shuffle=True, **loader_args)
val_loader = DataLoader(val_set, shuffle=False, drop_last=True, **loader_args)
# (Initialize logging)
experiment = wandb.init(project='U-Net', resume='allow', anonymous='must')
experiment.config.update(dict(epochs=epochs, batch_size=batch_size, learning_rate=learning_rate,
val_percent=val_percent, save_checkpoint=save_checkpoint, img_scale=img_scale,
amp=amp))
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {learning_rate}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_checkpoint}
Device: {device.type}
Images scaling: {img_scale}
Mixed Precision: {amp}
''')
# 4. Set up the optimizer, the loss, the learning rate scheduler and the loss scaling for AMP
optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2) # goal: maximize Dice score
grad_scaler = torch.cuda.amp.GradScaler(enabled=amp)
criterion = nn.CrossEntropyLoss()
global_step = 0
# 5. Begin training
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
images = batch['image']
true_masks = batch['mask']
assert images.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {images.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
images = images.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.long)
with torch.cuda.amp.autocast(enabled=amp):
masks_pred = net(images)
loss = criterion(masks_pred, true_masks) \
+ dice_loss(F.softmax(masks_pred, dim=1).float(),
F.one_hot(true_masks, net.n_classes).permute(0, 3, 1, 2).float(),
multiclass=True)
optimizer.zero_grad(set_to_none=True)
grad_scaler.scale(loss).backward()
grad_scaler.step(optimizer)
grad_scaler.update()
pbar.update(images.shape[0])
global_step += 1
epoch_loss += loss.item()
experiment.log({
'train loss': loss.item(),
'step': global_step,
'epoch': epoch
})
pbar.set_postfix(**{'loss (batch)': loss.item()})
# Evaluation round
if global_step % (n_train // (10 * batch_size)) == 0:
histograms = {}
for tag, value in net.named_parameters():
tag = tag.replace('/', '.')
histograms['Weights/' + tag] = wandb.Histogram(value.data.cpu())
histograms['Gradients/' + tag] = wandb.Histogram(value.grad.data.cpu())
val_score = evaluate(net, val_loader, device)
scheduler.step(val_score)
logging.info('Validation Dice score: {}'.format(val_score))
experiment.log({
'learning rate': optimizer.param_groups[0]['lr'],
'validation Dice': val_score,
'images': wandb.Image(images[0].cpu()),
'masks': {
'true': wandb.Image(true_masks[0].float().cpu()),
'pred': wandb.Image(torch.softmax(masks_pred, dim=1)[0].float().cpu()),
},
'step': global_step,
'epoch': epoch,
**histograms
})
if save_checkpoint:
Path(dir_checkpoint).mkdir(parents=True, exist_ok=True)
torch.save(net.state_dict(), str(dir_checkpoint / 'checkpoint_epoch{}.pth'.format(epoch + 1)))
logging.info(f'Checkpoint {epoch + 1} saved!')
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks')
parser.add_argument('--epochs', '-e', metavar='E', type=int, default=5, help='Number of epochs')
parser.add_argument('--batch-size', '-b', dest='batch_size', metavar='B', type=int, default=1, help='Batch size')
parser.add_argument('--learning-rate', '-l', metavar='LR', type=float, default=0.00001,
help='Learning rate', dest='lr')
parser.add_argument('--load', '-f', type=str, default=False, help='Load model from a .pth file')
parser.add_argument('--scale', '-s', type=float, default=0.5, help='Downscaling factor of the images')
parser.add_argument('--validation', '-v', dest='val', type=float, default=10.0,
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('--amp', action='store_true', default=False, help='Use mixed precision')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
net = UNet(n_channels=3, n_classes=2, bilinear=True)
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling')
if args.load:
net.load_state_dict(torch.load(args.load, map_location=device))
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batch_size,
learning_rate=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
amp=args.amp)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
sys.exit(0)
|
runp-heroku.py | superleader/chat2 | 227 | 12684162 | #!flask/bin/python
from app import app
|
clang/bindings/python/tests/cindex/test_token_kind.py | medismailben/llvm-project | 3,102 | 12684180 | <filename>clang/bindings/python/tests/cindex/test_token_kind.py<gh_stars>1000+
import os
from clang.cindex import Config
if 'CLANG_LIBRARY_PATH' in os.environ:
Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
from clang.cindex import TokenKind
import unittest
class TestTokenKind(unittest.TestCase):
def test_constructor(self):
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
self.assertEqual(t.value, 5)
self.assertEqual(t.name, 'foo')
def test_bad_register(self):
"""Ensure a duplicate value is rejected for registration."""
with self.assertRaises(ValueError):
TokenKind.register(2, 'foo')
def test_unknown_value(self):
"""Ensure trying to fetch an unknown value raises."""
with self.assertRaises(ValueError):
TokenKind.from_value(-1)
def test_registration(self):
"""Ensure that items registered appear as class attributes."""
self.assertTrue(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
self.assertIsInstance(literal, TokenKind)
def test_from_value(self):
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
self.assertIsInstance(t, TokenKind)
self.assertEqual(t, TokenKind.LITERAL)
def test_repr(self):
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
self.assertEqual(r, 'TokenKind.LITERAL')
|
setup.py | SkiingRoger/pyalgotrade-cn | 1,000 | 12684201 | #!/usr/bin/env python
# PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='PyAlgoTrade',
version='0.18',
description='Python Algorithmic Trading',
long_description='Python library for backtesting stock trading strategies.',
author='<NAME>',
author_email='<EMAIL>',
url='http://gbeced.github.io/pyalgotrade/',
download_url='http://sourceforge.net/projects/pyalgotrade/files/0.17/PyAlgoTrade-0.17.tar.gz/download',
packages=[
'pyalgotrade',
'pyalgotrade.cn',
'pyalgotrade.barfeed',
'pyalgotrade.bitcoincharts',
'pyalgotrade.bitstamp',
'pyalgotrade.broker',
'pyalgotrade.dataseries',
'pyalgotrade.feed',
'pyalgotrade.optimizer',
'pyalgotrade.stratanalyzer',
'pyalgotrade.strategy',
'pyalgotrade.talibext',
'pyalgotrade.technical',
'pyalgotrade.tools',
'pyalgotrade.twitter',
'pyalgotrade.utils',
'pyalgotrade.websocket',
'pyalgotrade.xignite',
],
install_requires=[
"numpy",
"pytz",
"python-dateutil",
"requests",
],
extras_require={
'Scipy': ["scipy"],
'TALib': ["Cython", "TA-Lib"],
'Plotting': ["matplotlib"],
'Bitstamp': ["ws4py>=0.3.4", "tornado"],
'Twitter': ["tweepy"],
},
)
|
SimpleCV/MachineLearning/TurkingModule.py | M93Pragya/SimpleCV | 1,686 | 12684219 | from SimpleCV import Display, Image, Color, ImageSet
import os
import os.path as osp
import time
import glob
import pickle
"""
This class is a helper utility for automatically turking
image data for supervsed learning. This class helps you
run through a bunch of images and sort them into a bunch
of classes or categories.
You provide a path to
the images that need to be turked/sorted, your class labels, and
what keys you want to bind to the classes. The turker will
load all the images, optionally process them, and then display
them. To sort the images you just push a key mapped to your class
and the class tosses them into a directory and labels them.
The class can optionally pickle your data for you to use later.
"""
class TurkingModule:
"""
**SUMMARY**
Init sets up the turking module.
**PARAMETERS**
* *source_path* - A list of the path(s) with the images to be turked.
* *out_path* - the output path, a directory will be created for each class.
* *classes* - the names of the classes you are turking as a list of strings.
* *key_bindings* - the keys to bind to each class when turking.
* *preprocess* - a preprocess function. It should take in an image and return a list of images.
* *postprocess* a post-process step. The signature should be image in and image out.
**EXAMPLE**
>>>> def GetBlobs(img):
>>>> blobs = img.findBlobs()
>>>> return [b.mMask for b in blobs]
>>>> def ScaleIng(img):
>>>> return img.resize(100,100).invert()
>>>> turker = TurkingModule(['./data/'],['./turked/'],['apple','banana','cherry'],['a','b','c'],preProcess=GetBlobs,postProcess=ScaleInv]
>>>> turker.turk()
>>>> # ~~~ stuff ~~~
>>>> turker.save('./derp.pkl')
** TODO **
TODO: Make it so you just pickle the data and don't have to save each file
"""
def __init__(self,source_paths,out_path,classList,key_bindings,preprocess=None, postprocess=None):
#if( not os.access(out_path,os.W_OK) ):
# print "Output path is not writeable."
# raise Exception("Output path is not writeable.")
self.keyBindings = key_bindings
self.classes = classList
self.countMap = {}
self.classMap = {}
self.directoryMap = {}
self.out_path = out_path
self.keyMap = {}
if( len(classList)!=len(key_bindings)):
print "Must have a key for each class."
raise Exception("Must have a key for each class.")
for key,cls in zip(key_bindings,classList):
self.keyMap[key] = cls
# this should work
if( preprocess is None ):
def fakeProcess(img):
return [img]
preprocess = fakeProcess
self.preProcess = preprocess
if( postprocess is None ):
def fakePostProcess(img):
return img
postprocess = fakePostProcess
self.postProcess = postprocess
self.srcImgs = ImageSet()
if( isinstance(source_paths,ImageSet) ):
self.srcImgs = source_path
else:
for sp in source_paths:
print "Loading " + sp
imgSet = ImageSet(sp)
print "Loaded " + str(len(imgSet))
self.srcImgs += imgSet
if( not osp.exists(out_path) ):
os.mkdir(out_path)
for c in classList:
outdir = out_path+c+'/'
self.directoryMap[c] = outdir
if( not osp.exists(outdir) ):
os.mkdir(outdir)
for c in classList:
searchstr = self.directoryMap[c]+'*.png'
fc = glob.glob(searchstr)
self.countMap[c] = len(fc)
self.classMap[c] = ImageSet(self.directoryMap[c])
def _saveIt(self,img,classType):
img.clearLayers()
path = self.out_path + classType + "/" + classType+str(self.countMap[classType])+".png"
print "Saving: " + path
img = self.postProcess(img)
self.classMap[classType].append(img)
img.save(path)
self.countMap[classType] = self.countMap[classType] + 1
def getClass(self,className):
"""
**SUMMARY**
Returns the image set that has been turked for the given class.
**PARAMETERS**
* *className* - the class name as a string.
**RETURNS**
An image set on success, None on failure.
**EXAMPLE**
>>>> # Do turking
>>>> iset = turkModule.getClass('cats')
>>>> iset.show()
"""
if(className in self.classMap):
return self.classMap[className]
else:
return None
def _drawControls(self,img,font_size,color,spacing ):
img.drawText("space - skip",10,spacing,fontsize=font_size,color=color)
img.drawText("esc - exit",10,2*spacing,fontsize=font_size,color=color)
y = 3*spacing
for k,cls in self.keyMap.items():
str = k + " - " + cls
img.drawText(str,10,y,fontsize=font_size,color=color)
y = y + spacing
return img
def turk(self,saveOriginal=False,disp_size=(800,600),showKeys=True,font_size=16,color=Color.RED,spacing=10 ):
"""
**SUMMARY**
This function does the turning of the data. The method goes through each image,
applies the preprocessing (which can return multiple images), displays each image
with an optional display of the key mapping. The user than selects the key that describes
the class of the image. The image is then post processed and saved to the directory.
The escape key kills the turking, the space key skips an image.
**PARAMETERS**
* *saveOriginal* - if true save the original image versus the preprocessed image.
* *disp_size* - size of the display to create.
* *showKeys* - Show the key mapping for the turking. Note that on small images this may not render correctly.
* *font_size* - the font size for the turking display.
* *color* - the font color.
* *spacing* - the spacing between each line of text on the display.
**RETURNS**
Nothing but stores each image in the directory. The image sets are also available
via the getClass method.
**EXAMPLE**
>>>> def GetBlobs(img):
>>>> blobs = img.findBlobs()
>>>> return [b.mMask for b in blobs]
>>>> def ScaleIng(img):
>>>> return img.resize(100,100).invert()
>>>> turker = TurkingModule(['./data/'],['./turked/'],['apple','banana','cherry'],['a','b','c'],preProcess=GetBlobs,postProcess=ScaleInv]
>>>> turker.turk()
>>>> # ~~~ stuff ~~~
>>>> turker.save('./derp.pkl')
** TODO **
TODO: fix the display so that it renders correctly no matter what the image size.
TODO: Make it so you can stop and start turking at any given spot in the process
"""
disp = Display(disp_size)
bail = False
for img in self.srcImgs:
print img.filename
samples = self.preProcess(img)
for sample in samples:
if( showKeys ):
sample = self._drawControls(sample,font_size,color,spacing )
sample.save(disp)
gotKey = False
while( not gotKey ):
keys = disp.checkEvents(True)
for k in keys:
if k in self.keyMap:
if saveOriginal:
self._saveIt(img,self.keyMap[k])
else:
self._saveIt(sample,self.keyMap[k])
gotKey = True
if k == 'space':
gotKey = True # skip
if k == 'escape':
return
def save(self,fname):
"""
**SUMMARY**
Pickle the relevant data from the turking.
** PARAMETERS **
* *fname* - the file fame.
"""
saveThis = [self.classes,self.directoryMap,self.classMap,self.countMap]
pickle.dump( saveThis, open( fname, "wb" ) )
# todo: eventually we should allow the user to randomly
# split up the data set and then save it.
# def splitTruthTest(self)
|
cea/technologies/network_layout/steiner_spanning_tree.py | architecture-building-systems/cea-toolbox | 121 | 12684220 | """
This script calculates the minimum spanning tree of a shapefile network
"""
import math
import os
import networkx as nx
import pandas as pd
from geopandas import GeoDataFrame as gdf
from networkx.algorithms.approximation.steinertree import steiner_tree
from shapely.geometry import LineString
from typing import List
import cea.config
import cea.inputlocator
from cea.constants import SHAPEFILE_TOLERANCE
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def calc_steiner_spanning_tree(crs_projected,
temp_path_potential_network_shp,
output_network_folder,
temp_path_building_centroids_shp,
path_output_edges_shp,
path_output_nodes_shp,
weight_field,
type_mat_default,
pipe_diameter_default,
type_network,
total_demand_location,
allow_looped_networks,
optimization_flag,
plant_building_names,
disconnected_building_names):
"""
Calculate the minimum spanning tree of the network. Note that this function can't be run in parallel in it's
present form.
:param str crs_projected: e.g. "+proj=utm +zone=48N +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
:param str temp_path_potential_network_shp: e.g. "TEMP/potential_network.shp"
:param str output_network_folder: "{general:scenario}/inputs/networks/DC"
:param str temp_path_building_centroids_shp: e.g. "%TEMP%/nodes_buildings.shp"
:param str path_output_edges_shp: "{general:scenario}/inputs/networks/DC/edges.shp"
:param str path_output_nodes_shp: "{general:scenario}/inputs/networks/DC/nodes.shp"
:param str weight_field: e.g. "Shape_Leng"
:param str type_mat_default: e.g. "T1"
:param float pipe_diameter_default: e.g. 150
:param str type_network: "DC" or "DH"
:param str total_demand_location: "{general:scenario}/outputs/data/demand/Total_demand.csv"
:param bool create_plant: e.g. True
:param bool allow_looped_networks:
:param bool optimization_flag:
:param List[str] plant_building_names: e.g. ``['B001']``
:param List[str] disconnected_building_names: e.g. ``['B002', 'B010', 'B004', 'B005', 'B009']``
:return: ``(mst_edges, mst_nodes)``
"""
# read shapefile into networkx format into a directed potential_network_graph, this is the potential network
potential_network_graph = nx.read_shp(temp_path_potential_network_shp)
building_nodes_graph = nx.read_shp(temp_path_building_centroids_shp)
# transform to an undirected potential_network_graph
iterator_edges = potential_network_graph.edges(data=True)
G = nx.Graph()
for (x, y, data) in iterator_edges:
x = (round(x[0], SHAPEFILE_TOLERANCE), round(x[1], SHAPEFILE_TOLERANCE))
y = (round(y[0], SHAPEFILE_TOLERANCE), round(y[1], SHAPEFILE_TOLERANCE))
G.add_edge(x, y, weight=data[weight_field])
# get the building nodes and coordinates
iterator_nodes = building_nodes_graph.nodes(data=True)
terminal_nodes_coordinates = []
terminal_nodes_names = []
for coordinates, data in iterator_nodes._nodes.items():
building_name = data['Name']
if building_name in disconnected_building_names:
print("Building {} is considered to be disconnected and it is not included".format(building_name))
else:
terminal_nodes_coordinates.append(
(round(coordinates[0], SHAPEFILE_TOLERANCE), round(coordinates[1], SHAPEFILE_TOLERANCE)))
terminal_nodes_names.append(data['Name'])
# calculate steiner spanning tree of undirected potential_network_graph
try:
mst_non_directed = nx.Graph(steiner_tree(G, terminal_nodes_coordinates))
nx.write_shp(mst_non_directed, output_network_folder) # need to write to disk and then import again
mst_nodes = gdf.from_file(path_output_nodes_shp)
mst_edges = gdf.from_file(path_output_edges_shp)
except:
raise ValueError('There was an error while creating the Steiner tree. '
'Check the streets.shp for isolated/disconnected streets (lines) and erase them, '
'the Steiner tree does not support disconnected graphs. '
'If no disconnected streets can be found, try increasing the SHAPEFILE_TOLERANCE in cea.constants and run again. '
'Otherwise, try using the Feature to Line tool of ArcMap with a tolerance of around 10m to solve the issue.')
# POPULATE FIELDS IN NODES
pointer_coordinates_building_names = dict(zip(terminal_nodes_coordinates, terminal_nodes_names))
def populate_fields(coordinate):
if coordinate in terminal_nodes_coordinates:
return pointer_coordinates_building_names[coordinate]
else:
return "NONE"
mst_nodes['coordinates'] = mst_nodes['geometry'].apply(
lambda x: (round(x.coords[0][0], SHAPEFILE_TOLERANCE), round(x.coords[0][1], SHAPEFILE_TOLERANCE)))
mst_nodes['Building'] = mst_nodes['coordinates'].apply(lambda x: populate_fields(x))
mst_nodes['Name'] = mst_nodes['FID'].apply(lambda x: "NODE" + str(x))
mst_nodes['Type'] = mst_nodes['Building'].apply(lambda x: 'CONSUMER' if x != "NONE" else "NONE")
# do some checks to see that the building names was not compromised
if len(terminal_nodes_names) != (len(mst_nodes['Building'].unique()) - 1):
raise ValueError('There was an error while populating the nodes fields. '
'One or more buildings could not be matched to nodes of the network. '
'Try changing the constant SNAP_TOLERANCE in cea/constants.py to try to fix this')
# POPULATE FIELDS IN EDGES
mst_edges.loc[:, 'Type_mat'] = type_mat_default
mst_edges.loc[:, 'Pipe_DN'] = pipe_diameter_default
mst_edges.loc[:, 'Name'] = ["PIPE" + str(x) for x in mst_edges.index]
if allow_looped_networks:
# add loops to the network by connecting None nodes that exist in the potential network
mst_edges, mst_nodes = add_loops_to_network(G,
mst_non_directed,
mst_nodes,
mst_edges,
type_mat_default,
pipe_diameter_default)
# mst_edges.drop(['weight'], inplace=True, axis=1)
if optimization_flag:
for building in plant_building_names:
building_anchor = building_node_from_name(building, mst_nodes)
mst_nodes, mst_edges = add_plant_close_to_anchor(building_anchor, mst_nodes, mst_edges,
type_mat_default, pipe_diameter_default)
elif os.path.exists(total_demand_location):
if len(plant_building_names) > 0:
building_anchor = mst_nodes[mst_nodes['Building'].isin(plant_building_names)]
else:
building_anchor = calc_coord_anchor(total_demand_location, mst_nodes, type_network)
mst_nodes, mst_edges = add_plant_close_to_anchor(building_anchor, mst_nodes, mst_edges,
type_mat_default, pipe_diameter_default)
# GET COORDINATE AND SAVE FINAL VERSION TO DISK
mst_edges.crs = crs_projected
mst_nodes.crs = crs_projected
mst_edges['length_m'] = mst_edges['weight']
mst_edges[['geometry', 'length_m', 'Type_mat', 'Name', 'Pipe_DN']].to_file(path_output_edges_shp,
driver='ESRI Shapefile')
mst_nodes[['geometry', 'Building', 'Name', 'Type']].to_file(path_output_nodes_shp, driver='ESRI Shapefile')
def add_loops_to_network(G, mst_non_directed, new_mst_nodes, mst_edges, type_mat, pipe_dn):
added_a_loop = False
# Identify all NONE type nodes in the steiner tree
for node_number, node_coords in zip(new_mst_nodes.index, new_mst_nodes['coordinates']):
if new_mst_nodes['Type'][node_number] == 'NONE':
# find neighbours of nodes in the potential network and steiner network
potential_neighbours = G[node_coords]
steiner_neighbours = mst_non_directed[node_coords]
# check if there are differences, if yes, an edge was deleted here
if not set(potential_neighbours.keys()) == set(steiner_neighbours.keys()):
new_neighbour_list = []
for a in potential_neighbours.keys():
if a not in steiner_neighbours.keys():
new_neighbour_list.append(a)
# check if the node that is additional in the potential network also exists in the steiner network
for new_neighbour in new_neighbour_list:
if new_neighbour in list(new_mst_nodes['coordinates'].values):
# check if it is a none type
# write out index of this node
node_index = list(new_mst_nodes['coordinates'].values).index(new_neighbour)
if new_mst_nodes['Type'][node_index] == 'NONE':
# create new edge
line = LineString((node_coords, new_neighbour))
if not line in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
added_a_loop = True
mst_edges.reset_index(inplace=True, drop=True)
if not added_a_loop:
print('No first degree loop added. Trying two nodes apart.')
# Identify all NONE type nodes in the steiner tree
for node_number, node_coords in zip(new_mst_nodes.index, new_mst_nodes['coordinates']):
if new_mst_nodes['Type'][node_number] == 'NONE':
# find neighbours of nodes in the potential network and steiner network
potential_neighbours = G[node_coords]
steiner_neighbours = mst_non_directed[node_coords]
# check if there are differences, if yes, an edge was deleted here
if not set(potential_neighbours.keys()) == set(steiner_neighbours.keys()):
new_neighbour_list = []
for a in potential_neighbours.keys():
if a not in steiner_neighbours.keys():
new_neighbour_list.append(a)
# check if the node that is additional in the potential network does not exist in the steiner network
for new_neighbour in new_neighbour_list:
if new_neighbour not in list(new_mst_nodes['coordinates'].values):
# find neighbours of that node
second_degree_pot_neigh = list(G[new_neighbour].keys())
for potential_second_deg_neighbour in second_degree_pot_neigh:
if potential_second_deg_neighbour in list(new_mst_nodes[
'coordinates'].values) and potential_second_deg_neighbour != node_coords:
# check if it is a none type
# write out index of this node
node_index = list(new_mst_nodes['coordinates'].values).index(
potential_second_deg_neighbour)
if new_mst_nodes['Type'][node_index] == 'NONE':
# create new edge
line = LineString((node_coords, new_neighbour))
if line not in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
# Add new node from potential network to steiner tree
# create copy of selected node and add to list of all nodes
copy_of_new_mst_nodes = new_mst_nodes.copy()
x_distance = new_neighbour[0] - node_coords[0]
y_distance = new_neighbour[1] - node_coords[1]
copy_of_new_mst_nodes.geometry = copy_of_new_mst_nodes.translate(
xoff=x_distance, yoff=y_distance)
selected_node = copy_of_new_mst_nodes[
copy_of_new_mst_nodes["coordinates"] == node_coords]
selected_node.loc[:, "Name"] = "NODE" + str(new_mst_nodes.Name.count())
selected_node.loc[:, "Type"] = "NONE"
selected_node["coordinates"] = selected_node.geometry.values[0].coords
if selected_node["coordinates"].values not in new_mst_nodes[
"coordinates"].values:
new_mst_nodes = new_mst_nodes.append(selected_node)
new_mst_nodes.reset_index(inplace=True, drop=True)
line2 = LineString((new_neighbour, potential_second_deg_neighbour))
if line2 not in mst_edges['geometry']:
mst_edges = mst_edges.append(
{"geometry": line2, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())},
ignore_index=True)
added_a_loop = True
mst_edges.reset_index(inplace=True, drop=True)
if not added_a_loop:
print('No loops added.')
return mst_edges, new_mst_nodes
def calc_coord_anchor(total_demand_location, nodes_df, type_network):
total_demand = pd.read_csv(total_demand_location)
nodes_names_demand = nodes_df.merge(total_demand, left_on="Building", right_on="Name", how="inner")
if type_network == "DH":
field = "QH_sys_MWhyr"
elif type_network == "DC":
field = "QC_sys_MWhyr"
else:
raise ValueError("Invalid value for variable 'type_network': {type_network}".format(type_network=type_network))
max_value = nodes_names_demand[field].max()
building_series = nodes_names_demand[nodes_names_demand[field] == max_value]
return building_series
def building_node_from_name(building_name, nodes_df):
building_series = nodes_df[nodes_df['Building'] == building_name]
return building_series
def add_plant_close_to_anchor(building_anchor, new_mst_nodes, mst_edges, type_mat, pipe_dn):
# find closest node
copy_of_new_mst_nodes = new_mst_nodes.copy()
building_coordinates = building_anchor.geometry.values[0].coords
x1 = building_coordinates[0][0]
y1 = building_coordinates[0][1]
delta = 10E24 # big number
for node in copy_of_new_mst_nodes.iterrows():
if node[1]['Type'] == 'NONE':
x2 = node[1].geometry.coords[0][0]
y2 = node[1].geometry.coords[0][1]
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if 0 < distance < delta:
delta = distance
node_id = node[1].Name
pd.options.mode.chained_assignment = None # avoid warning
# create copy of selected node and add to list of all nodes
copy_of_new_mst_nodes.geometry = copy_of_new_mst_nodes.translate(xoff=1, yoff=1)
selected_node = copy_of_new_mst_nodes[copy_of_new_mst_nodes["Name"] == node_id]
selected_node.loc[:, "Name"] = "NODE" + str(new_mst_nodes.Name.count())
selected_node.loc[:, "Type"] = "PLANT"
new_mst_nodes = new_mst_nodes.append(selected_node)
new_mst_nodes.reset_index(inplace=True, drop=True)
# create new edge
point1 = (selected_node.geometry.x, selected_node.geometry.y)
point2 = (new_mst_nodes[new_mst_nodes["Name"] == node_id].geometry.x,
new_mst_nodes[new_mst_nodes["Name"] == node_id].geometry.y)
line = LineString((point1, point2))
mst_edges = mst_edges.append({"geometry": line, "Pipe_DN": pipe_dn, "Type_mat": type_mat,
"Name": "PIPE" + str(mst_edges.Name.count())
}, ignore_index=True)
mst_edges.reset_index(inplace=True, drop=True)
return new_mst_nodes, mst_edges
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
weight_field = 'Shape_Leng'
type_mat_default = config.network_layout.type_mat
pipe_diameter_default = config.network_layout.pipe_diameter
type_network = config.network_layout.network_type
create_plant = config.network_layout.create_plant
output_substations_shp = locator.get_temporary_file("nodes_buildings.shp")
path_potential_network = locator.get_temporary_file("potential_network.shp") # shapefile, location of output.
output_edges = locator.get_network_layout_edges_shapefile(type_network, '')
output_nodes = locator.get_network_layout_nodes_shapefile(type_network, '')
output_network_folder = locator.get_input_network_folder(type_network, '')
total_demand_location = locator.get_total_demand()
calc_steiner_spanning_tree(path_potential_network, output_network_folder, output_substations_shp, output_edges,
output_nodes, weight_field, type_mat_default, pipe_diameter_default, type_network,
total_demand_location, create_plant)
if __name__ == '__main__':
main(cea.config.Configuration())
|
i3d_utils.py | 13551132330/I3D-Tensorflow | 119 | 12684242 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
import os
import time
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import math
import input_data
import numpy as np
from multiprocessing import Pool
import threading
from tqdm import tqdm,trange
def placeholder_inputs(batch_size=16, num_frame_per_clib=16, crop_size=224, rgb_channels=3, flow_channels=2):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
num_frame_per_clib: The num of frame per clib.
crop_size: The crop size of per clib.
channels: The input channel of per clib.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
rgb_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
rgb_channels))
flow_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
flow_channels))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size
))
is_training = tf.placeholder(tf.bool)
return rgb_images_placeholder, flow_images_placeholder, labels_placeholder, is_training
def rgb_placeholder_inputs(batch_size=16, num_frame_per_clib=16, crop_size=224, rgb_channels=3, flow_channels=2):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
num_frame_per_clib: The num of frame per clib.
crop_size: The crop size of per clib.
channels: The input channel of per clib.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
rgb_images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
num_frame_per_clib,
crop_size,
crop_size,
rgb_channels))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size
))
is_training = tf.placeholder(tf.bool)
return rgb_images_placeholder, labels_placeholder, is_training
def Normalization(clips, frames_num):
new_clips = []
for index in range(frames_num):
clip = tf.image.per_image_standardization(clips[index])
new_clips.append(clip)
return new_clips
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def l2_loss(weight_decay, weighyt_list):
l2_reg = tf.contrib.layers.l2_regularizer(weight_decay)
return tf.contrib.layers.apply_regularization(regularizer=l2_reg, weights_list=weighyt_list)
def tower_loss(logit, labels, wd):
print(logit.shape)
print(labels.shape)
weight_map = []
for variable in tf.global_variables():
if 'conv_3d/w' in variable.name or 'kernel' in variable.name:
weight_map.append(variable)
cross_entropy_mean = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logit)
)
weight_decay = l2_loss(wd, weight_map)
#tf.summary.scalar('sgd_weight_decay_loss', weight_decay)
# Calculate the total loss for the current tower.
total_loss = cross_entropy_mean + weight_decay
return total_loss
def tower_acc(logit, labels):
correct_pred = tf.equal(tf.argmax(logit, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, wd):
var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
if wd is not None:
weight_decay = tf.nn.l2_loss(var)*wd
tf.add_to_collection('weightdecay_losses', weight_decay)
return var
def data_to_feed_dict(data):
rgb_train_images = []
train_labels = []
for i in data:
tmp_train_images = i.get_result()[0]
tmp_labels = i.get_result()[1]
rgb_train_images.extend(tmp_train_images)
train_labels.extend(tmp_labels)
return np.array(rgb_train_images), np.array(train_labels)
def get_data(filename, batch_size, num_frames_per_clip=64, sample_rate=4, crop_size=224, shuffle=False, add_flow=False):
rgb_train_images, flow_train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
filename=filename,
batch_size=batch_size,
num_frames_per_clip=num_frames_per_clip,
sample_rate=sample_rate,
crop_size=crop_size,
shuffle=shuffle,
add_flow=add_flow
)
return rgb_train_images, train_labels
class MyThread(threading.Thread):
def __init__(self, func, args=()):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
def load_data(filename, batch_size, num_frames_per_clip, sample_rate, crop_size, shuffle=False, add_flow=False):
data = []
'''
p = Pool(batch_size/8)
for i in range(batch_size):
data.append(p.apply_async(get_data, args=(
filename,
8,
num_frames_per_clip,
sample_rate,
crop_size,
shuffle,
add_flow
)))
p.close()
#p.join()
'''
for i in range(batch_size/4):
t = MyThread(get_data, args=(
filename,
4,
num_frames_per_clip,
sample_rate,
crop_size,
shuffle,
add_flow
))
data.append(t)
t.start()
for t in data:
t.join()
print('DATA_LOAD_COMP: enqueue......')
rgb_train_images, train_labels = data_to_feed_dict(data)
return rgb_train_images, train_labels
def topk(predicts, labels, ids):
scores = {}
top1_list = []
top5_list = []
clips_top1_list = []
clips_top5_list = []
start_time = time.time()
print('Results process..............')
for index in tqdm(range(len(predicts))):
id = ids[index]
score = predicts[index]
if str(id) not in scores.keys():
scores['%d'%id] = []
scores['%d'%id].append(score)
else:
scores['%d'%id].append(score)
avg_pre_index = np.argsort(score).tolist()
top1 = (labels[id] in avg_pre_index[-1:])
top5 = (labels[id] in avg_pre_index[-5:])
clips_top1_list.append(top1)
clips_top5_list.append(top5)
print('Clips-----TOP_1_ACC in test: %f' % np.mean(clips_top1_list))
print('Clips-----TOP_5_ACC in test: %f' % np.mean(clips_top5_list))
print('..............')
for _id in range(len(labels)-1):
avg_pre_index = np.argsort(np.mean(scores['%d'%_id], axis=0)).tolist()
top1 = (labels[_id] in avg_pre_index[-1:])
top5 = (labels[_id] in avg_pre_index[-5:])
top1_list.append(top1)
top5_list.append(top5)
print('TOP_1_ACC in test: %f' % np.mean(top1_list))
print('TOP_5_ACC in test: %f' % np.mean(top5_list))
duration = time.time() - start_time
print('Time use: %.3f' % duration)
|
py/ztools/lib/Titles.py | HerrTrigger/NSC_BUILDER | 828 | 12684255 | <reponame>HerrTrigger/NSC_BUILDER
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import time
import json
import Title
import operator
import Config
import Print
import threading
global titles
titles = {}
if os.path.isfile('titles.json'):
os.rename('titles.json', 'titledb/titles.json')
def data():
return titles
def items():
return titles.items()
def get(key):
return titles[key]
def contains(key):
return key in titles
def set(key, value):
titles[key] = value
#def titles():
# return titles
def keys():
return titles.keys()
def loadTitleFile(path, silent = False):
timestamp = time.clock()
with open(path, encoding="utf-8-sig") as f:
loadTitleBuffer(f.read(), silent)
Print.info('loaded ' + path + ' in ' + str(time.clock() - timestamp) + ' seconds')
def loadTitleBuffer(buffer, silent = False):
firstLine = True
map = ['id', 'key', 'name']
for line in buffer.split('\n'):
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
if firstLine:
firstLine = False
if re.match('[A-Za-z\|\s]+', line, re.I):
map = line.split('|')
i = 0
while i < len(map):
if map[i] == 'RightsID':
map[i] = 'id'
if map[i] == 'TitleKey':
map[i] = 'key'
if map[i] == 'Name':
map[i] = 'name'
i += 1
continue
t = Title.Title()
t.loadCsv(line, map)
if not t.id in keys():
titles[t.id] = Title.Title()
titleKey = titles[t.id].key
titles[t.id].loadCsv(line, map)
if not silent and titleKey != titles[t.id].key:
Print.info('Added new title key for ' + str(titles[t.id].name) + '[' + str(t.id) + ']')
confLock = threading.Lock()
def load():
confLock.acquire()
global titles
if os.path.isfile("titledb/titles.json"):
timestamp = time.clock()
with open('titledb/titles.json', encoding="utf-8-sig") as f:
for i, k in json.loads(f.read()).items():
#if k['frontBoxArt'] and k['frontBoxArt'].endswith('.jpg'):
# k['iconUrl'] = k['frontBoxArt']
# k['frontBoxArt'] = None
titles[i] = Title.Title()
titles[i].__dict__ = k
Print.info('loaded titledb/titles.json in ' + str(time.clock() - timestamp) + ' seconds')
if os.path.isfile("titles.txt"):
loadTitleFile('titles.txt', True)
try:
files = [f for f in os.listdir(Config.paths.titleDatabase) if f.endswith('.txt')]
files.sort()
for file in files:
loadTitleFile(Config.paths.titleDatabase + '/' + file, False)
except BaseException as e:
Print.error('title load error: ' + str(e))
confLock.release()
def export(fileName = 'titles.txt', map = ['id', 'rightsId', 'key', 'isUpdate', 'isDLC', 'isDemo', 'name', 'version', 'region', 'retailOnly']):
buffer = ''
buffer += '|'.join(map) + '\n'
for t in sorted(list(titles.values())):
buffer += t.serialize(map) + '\n'
with open(fileName, 'w', encoding='utf-8') as csv:
csv.write(buffer)
def save(fileName = 'titledb/titles.json'):
confLock.acquire()
try:
j = {}
for i,k in titles.items():
if not k.id or k.id == '0000000000000000':
continue
if k.description:
k.description = k.description.strip()
j[k.id] = k.__dict__
with open(fileName, 'w') as outfile:
json.dump(j, outfile, indent=4)
except:
confLock.release()
raise
confLock.release()
class Queue:
def __init__(self):
self.queue = []
self.lock = threading.Lock()
self.i = 0
def add(self, id, skipCheck = False):
self.lock.acquire()
id = id.upper()
if not id in self.queue and (skipCheck or self.isValid(id)):
self.queue.append(id)
self.lock.release()
def shift(self):
self.lock.acquire()
if self.i >= len(self.queue):
self.lock.release()
return None
self.i += 1
r =self.queue[self.i-1]
self.lock.release()
return r
def empty(self):
return bool(self.size() == 0)
def get(self, idx = None):
if idx == None:
return self.queue
return self.queue[idx]
def isValid(self, id):
return contains(id)
def load(self):
try:
with open('conf/queue.txt', encoding="utf-8-sig") as f:
for line in f.read().split('\n'):
self.add(line.strip())
except BaseException as e:
pass
def size(self):
return len(self.queue) - self.i
def save(self):
self.lock.acquire()
try:
with open('conf/queue.txt', 'w', encoding='utf-8') as f:
for id in self.queue:
f.write(id + '\n')
except:
pass
self.lock.release()
global queue
queue = Queue() |
examples/tf/supervised_advanced_tf.py | stjordanis/QMLT | 117 | 12684260 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. currentmodule:: qmlt.examples.tf
.. code-author:: <NAME> <<EMAIL>>
We revisit the example of a simple supervised learning task with the tensorflow circuit learner
and introduce adaptive learning rate, printing, warm start and batch mode.
"""
import strawberryfields as sf
from strawberryfields.ops import Dgate, BSgate
import tensorflow as tf
from qmlt.tf.helpers import make_param
from qmlt.tf import CircuitLearner
steps = 200
batch_size = 2
def circuit(X):
params = [make_param(name='phi', constant=2., monitor=True)]
eng, q = sf.Engine(2)
with eng:
Dgate(X[:, 0], 0.) | q[0]
Dgate(X[:, 1], 0.) | q[1]
BSgate(phi=params[0]) | (q[0], q[1])
BSgate() | (q[0], q[1])
num_inputs = X.get_shape().as_list()[0]
state = eng.run('tf', cutoff_dim=10, eval=False, batch_size=num_inputs)
p0 = state.fock_prob([0, 2])
p1 = state.fock_prob([2, 0])
normalization = p0 + p1 + 1e-10
circuit_output = p1 / normalization
return circuit_output
def myloss(circuit_output, targets):
return tf.losses.mean_squared_error(labels=circuit_output, predictions=targets)
def outputs_to_predictions(outpt):
return tf.round(outpt)
X_train = [[0.2, 0.4], [0.6, 0.8], [0.4, 0.2], [0.8, 0.6]]
Y_train = [1, 1, 0, 0]
X_test = [[0.25, 0.5], [0.5, 0.25]]
Y_test = [1, 0]
X_pred = [[0.4, 0.5], [0.5, 0.4]]
# There are some changes here:
# We decay the learning rate by a factor 1/(1-decay*step) in each step.
# We train_circuit with batches of 2 training inputs (instead of the full batch).
# We also print out the results every 10th step.
# Finally, you can set 'warm start': True to continue previosu training.
# (MAKE SURE YOU RUN THE SAME SCRIPT ONCE WITH A COLD START,
# ELSE YOU GET ERRORS WHEN LOADING THE MODEL!).
# This loads the final parameters from the previous training. You can see
# that the global step starts where it ended the last time you ran the script.
hyperparams = {'circuit': circuit,
'task': 'supervised',
'loss': myloss,
'optimizer': 'SGD',
'init_learning_rate': 0.5,
'decay': 0.01,
'print_log': True,
'log_every': 10,
'warm_start': False
}
learner = CircuitLearner(hyperparams=hyperparams)
learner.train_circuit(X=X_train, Y=Y_train, steps=steps, batch_size=batch_size)
test_score = learner.score_circuit(X=X_test, Y=Y_test,
outputs_to_predictions=outputs_to_predictions)
# The score_circuit() function returns a dictionary of different metrics.
print("\nPossible scores to print: {}".format(list(test_score.keys())))
# We select the accuracy and loss.
print("Accuracy on test set: ", test_score['accuracy'])
print("Loss on test set: ", test_score['loss'])
outcomes = learner.run_circuit(X=X_pred,
outputs_to_predictions=outputs_to_predictions)
# The run_circuit() function returns a dictionary of different outcomes.
print("\nPossible outcomes to print: {}".format(list(outcomes.keys())))
# We select the predictions
print("Predictions for new inputs: {}".format(outcomes['predictions']))
|
egs2/slurp_entity/asr1/local/evaluation/evaluate.py | texpomru13/espnet | 5,053 | 12684268 | <gh_stars>1000+
import argparse
import logging
from progress.bar import Bar
from metrics import ErrorMetric
from util import format_results, load_predictions, load_gold_data
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SLURP evaluation script")
parser.add_argument(
"-g",
"--gold-data",
required=True,
type=str,
help="Gold data in SLURP jsonl format",
)
parser.add_argument(
"-p", "--prediction-file", type=str, required=True, help="Predictions file"
)
parser.add_argument(
"--load-gold",
action="store_true",
help="When evaluating against gold transcriptions\
(gold_*_predictions.jsonl), this flag must be true.",
)
parser.add_argument(
"--average",
type=str,
default="micro",
help="The averaging modality {micro, macro}.",
)
parser.add_argument(
"--full",
action="store_true",
help="Print the full results, including per-label metrics.",
)
parser.add_argument(
"--errors", action="store_true", help="Print TPs, FPs, and FNs in each row."
)
parser.add_argument(
"--table-layout",
type=str,
default="fancy_grid",
help="The results table layout {fancy_grid (DEFAULT), csv, tsv}.",
)
args = parser.parse_args()
logger.info("Loading data")
pred_examples = load_predictions(args.prediction_file, args.load_gold)
gold_examples = load_gold_data(args.gold_data, args.load_gold)
n_gold_examples = len(gold_examples)
logger.info("Initializing metrics")
scenario_f1 = ErrorMetric.get_instance(metric="f1", average=args.average)
action_f1 = ErrorMetric.get_instance(metric="f1", average=args.average)
intent_f1 = ErrorMetric.get_instance(metric="f1", average=args.average)
span_f1 = ErrorMetric.get_instance(metric="span_f1", average=args.average)
distance_metrics = {}
for distance in ["word", "char"]:
distance_metrics[distance] = ErrorMetric.get_instance(
metric="span_distance_f1", average=args.average, distance=distance
)
slu_f1 = ErrorMetric.get_instance(metric="slu_f1", average=args.average)
bar = Bar(message="Evaluating metrics", max=len(gold_examples))
for gold_id in list(gold_examples):
if gold_id in pred_examples:
gold_example = gold_examples.pop(gold_id)
pred_example = pred_examples.pop(gold_id)
scenario_f1(gold_example["scenario"], pred_example["scenario"])
action_f1(gold_example["action"], pred_example["action"])
intent_f1(
"{}_{}".format(gold_example["scenario"], gold_example["action"]),
"{}_{}".format(pred_example["scenario"], pred_example["action"]),
)
# Filtering below has been added to original code
# because of way in which punctuation handled in data preparation
for k in gold_example["entities"]:
k["filler"] = k["filler"].replace(" '", "'")
span_f1(gold_example["entities"], pred_example["entities"])
for distance, metric in distance_metrics.items():
metric(gold_example["entities"], pred_example["entities"])
bar.next()
bar.finish()
logger.info("Results:")
results = scenario_f1.get_metric()
print(
format_results(
results=results,
label="scenario",
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
results = action_f1.get_metric()
print(
format_results(
results=results,
label="action",
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
results = intent_f1.get_metric()
print(
format_results(
results=results,
label="intent (scen_act)",
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
results = span_f1.get_metric()
print(
format_results(
results=results,
label="entities",
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
for distance, metric in distance_metrics.items():
results = metric.get_metric()
slu_f1(results)
print(
format_results(
results=results,
label="entities (distance {})".format(distance),
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
results = slu_f1.get_metric()
print(
format_results(
results=results,
label="SLU F1",
full=args.full,
errors=args.errors,
table_layout=args.table_layout,
),
"\n",
)
logger.warning(
"Gold examples not predicted: {} (out of {})".format(
len(gold_examples), n_gold_examples
)
)
|
integration_test/test_crc16.py | lynix94/nbase-arc | 176 | 12684271 | #
# Copyright 2015 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import testbase
import util
import gateway_mgmt
import config
import default_cluster
class TestCRC16( unittest.TestCase ):
cluster = config.clusters[0]
@classmethod
def setUpClass( cls ):
cls.conf_checker = default_cluster.initialize_starting_up_smr_before_redis( cls.cluster )
assert cls.conf_checker != None, 'failed to initialize cluster'
@classmethod
def tearDownClass( cls ):
testbase.defaultTearDown(cls)
def setUp( self ):
util.set_process_logfile_prefix( 'TestCRC16_%s' % self._testMethodName )
return 0
def tearDown( self ):
return 0
def test_single_thread_input( self ):
util.print_frame()
self.cluster = config.clusters[0]
result = {}
ip, port = util.get_rand_gateway( self.cluster )
gw = gateway_mgmt.Gateway( ip )
self.assertEquals( 0, gw.connect( ip, port ) )
max = 5
for idx in range( max ):
cmd = 'set key%d 0\r\n' % (idx)
gw.write( cmd )
result[idx] = gw.read_until( '\r\n' )
data_max = 65535
for idx in range( max ):
for cnt in range( 0, data_max ):
gw.write( 'crc16 key%d %d\r\n' % (idx, cnt) )
result[idx] = gw.read_until( '\r\n' )
for idx in range( max - 1 ):
self.assertEquals( result[idx], result[idx + 1] )
|
tensorflow/python/kernel_tests/v1_compat_tests/stack_op_test.py | EricRemmerswaal/tensorflow | 190,993 | 12684273 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""V1 tests for Stack and ParallelStack Ops."""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AutomaticStackingTest(test.TestCase):
@test_util.run_deprecated_v1
# Tests symbolic tensor semantics
def testVariable(self):
with self.session():
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
self.evaluate(v.initializer)
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
self.evaluate(result))
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]],
self.evaluate(result))
@test_util.run_deprecated_v1
# Placeholders are V1 only.
def testPlaceholder(self):
with self.session():
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
result_1.eval(feed_dict={ph_1: 2}))
@test_util.run_deprecated_v1
# Placeholders and shape inference are only applicable in Graph mode.
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
|
runtests.py | abronin/django-admin-easy | 350 | 12684277 | <gh_stars>100-1000
# coding: utf-8
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests():
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_project.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures))
if __name__ == '__main__':
runtests() |
Configuration/Eras/python/Modifier_run2_CSC_2018_cff.py | ckamtsikis/cmssw | 852 | 12684303 | <filename>Configuration/Eras/python/Modifier_run2_CSC_2018_cff.py
import FWCore.ParameterSet.Config as cms
run2_CSC_2018 = cms.Modifier()
|
tests/py/test_record_an_exchange.py | kant/gratipay.com | 517 | 12684318 | <filename>tests/py/test_record_an_exchange.py
from __future__ import unicode_literals
from psycopg2 import IntegrityError
from gratipay.testing import Harness, D
from gratipay.models.exchange_route import ExchangeRoute
class TestRecordAnExchange(Harness):
# fixture
# =======
def make_participants(self):
self.make_participant('alice', claimed_time='now', is_admin=True)
self.bob = self.make_participant('bob', claimed_time='now')
def record_an_exchange(self, data, make_participants=True):
if make_participants:
self.make_participants()
data.setdefault('status', 'succeeded')
data.setdefault('note', 'noted')
if 'route_id' not in data:
try:
data['route_id'] = ExchangeRoute.insert(self.bob, 'paypal', '<EMAIL>').id
except IntegrityError:
data['route_id'] = ExchangeRoute.from_network(self.bob, 'paypal').id
if data['status'] is None:
del(data['status'])
if data['route_id'] is None:
del(data['route_id'])
if 'ref' not in data:
data['ref'] = 'N/A'
return self.client.PxST('/~bob/history/record-an-exchange', data, auth_as='alice')
# tests
# =====
def test_success_is_302(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0'})
assert response.code == 302
assert response.headers['location'] == '/bob/history/'
def test_non_admin_is_403(self):
self.make_participant('alice', claimed_time='now')
self.bob = self.make_participant('bob', claimed_time='now')
actual = self.record_an_exchange({'amount': '10', 'fee': '0'}, False).code
assert actual == 403
def test_bad_amount_is_400(self):
response = self.record_an_exchange({'amount': 'cheese', 'fee': '0'})
assert response.code == 400
assert response.body == "Invalid amount/fee"
def test_bad_fee_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': 'cheese'})
assert response.code == 400
assert response.body == "Invalid amount/fee"
def test_no_note_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'note': ''})
assert response.code == 400
assert response.body == "Invalid note"
def test_whitespace_note_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'note': ' '})
assert response.code == 400
assert response.body == "Invalid note"
def test_no_route_id_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'route_id': None})
assert response.code == 400
assert response.body == "Invalid route_id"
def test_bad_route_id_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'route_id': 'foo'})
assert response.code == 400
assert response.body == "Invalid route_id"
def test_non_existent_route_id_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'route_id': '123456'})
assert response.code == 400
assert response.body == "Route doesn't exist"
def test_route_should_belong_to_user_else_400(self):
alice = self.make_participant('alice', claimed_time='now', is_admin=True)
self.make_participant('bob', claimed_time='now')
route = ExchangeRoute.insert(alice, 'paypal', '<EMAIL>')
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'route_id': route.id}, False)
assert response.code == 400
assert response.body == "Route doesn't exist"
def test_no_ref_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'ref': ''})
assert response.code == 400
assert response.body == "Invalid Reference"
def test_whitespace_ref_is_400(self):
response = self.record_an_exchange({'amount': '10', 'fee': '0', 'ref': ' '})
assert response.code == 400
assert response.body == "Invalid Reference"
def test_dropping_balance_below_zero_is_allowed_in_this_context(self):
self.record_an_exchange({'amount': '-10', 'fee': '0'})
actual = self.db.one("SELECT balance FROM participants WHERE username='bob'")
assert actual == D('-10.00')
def test_success_records_exchange(self):
self.record_an_exchange({'amount': '10', 'fee': '0.50', 'ref':"605BSOC6G855L15OO"})
expected = { "amount": D('10.00')
, "fee": D('0.50')
, "participant": "bob"
, "recorder": "alice"
, "note": "noted"
, "ref" : "605BSOC6G855L15OO"
, "route": ExchangeRoute.from_network(self.bob, 'paypal').id
}
SQL = "SELECT amount, fee, participant, recorder, note, route, ref " \
"FROM exchanges"
actual = self.db.one(SQL, back_as=dict)
assert actual == expected
def test_success_updates_balance(self):
self.record_an_exchange({'amount': '10', 'fee': '0'})
expected = D('10.00')
SQL = "SELECT balance FROM participants WHERE username='bob'"
actual = self.db.one(SQL)
assert actual == expected
def test_withdrawals_work(self):
self.make_participant('alice', claimed_time='now', is_admin=True)
self.bob = self.make_participant('bob', claimed_time='now', balance=20)
self.record_an_exchange({'amount': '-7', 'fee': '0'}, make_participants=False)
expected = D('13.00')
SQL = "SELECT balance FROM participants WHERE username='bob'"
actual = self.db.one(SQL)
assert actual == expected
def test_withdrawals_take_fee_out_of_balance(self):
self.make_participant('alice', claimed_time='now', is_admin=True)
self.bob = self.make_participant('bob', claimed_time='now', balance=20)
self.bob = self.record_an_exchange({'amount': '-7', 'fee': '1.13'}, False)
SQL = "SELECT balance FROM participants WHERE username='bob'"
assert self.db.one(SQL) == D('11.87')
def test_can_set_status(self):
self.make_participants()
for status in ('pre', 'pending', 'failed', 'succeeded'):
self.record_an_exchange({'amount': '10', 'fee': '0', 'status': status}, False)
actual = self.db.one("SELECT status FROM exchanges ORDER BY timestamp desc LIMIT 1")
assert actual == status
def test_cant_record_new_exchanges_with_None_status(self):
r = self.record_an_exchange({'amount': '10', 'fee': '0', 'status': None})
assert r.code == 400
assert self.db.one("SELECT count(*) FROM exchanges") == 0
def test_succeeded_affects_balance(self):
self.make_participants()
balance = 0
for amount in ('10', '-10'):
self.record_an_exchange({'amount': amount, 'fee': '0'}, False)
balance += int(amount)
assert self.db.one("SELECT balance FROM participants WHERE username='bob'") == balance
def test_failed_doesnt_affect_balance(self):
self.make_participants()
for amount in ('10', '-10'):
self.record_an_exchange({
'amount': amount,
'fee': '0',
'status': 'failed'
}, False)
assert self.db.one("SELECT balance FROM participants WHERE username='bob'") == 0
def test_other_statuses_dont_affect_balance_for_payins(self):
self.make_participants()
for status in ('pre', 'pending'):
self.record_an_exchange({
'amount': '10',
'fee': '0',
'status': status
}, False)
assert self.db.one("SELECT balance FROM participants WHERE username='bob'") == 0
def test_other_statuses_affect_balance_for_payouts(self):
self.make_participants()
balance = 0
for status in ('pre', 'pending'):
self.record_an_exchange({
'amount': '-10',
'fee': '0',
'status': status
}, False)
balance -= 10
assert self.db.one("SELECT balance FROM participants WHERE username='bob'") == balance
|
third_party/virtualbox/src/libs/xpcom18a4/python/test/regrtest.py | Fimbure/icebox-1 | 521 | 12684349 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> <<EMAIL>> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# regrtest.py
#
# The Regression Tests for the xpcom package.
import os
import sys
import unittest
# A little magic to create a single "test suite" from all test_ files
# in this dir. A single suite makes for prettier output test :)
def suite():
# Loop over all test_*.py files here
try:
me = __file__
except NameError:
me = sys.argv[0]
me = os.path.abspath(me)
files = os.listdir(os.path.dirname(me))
suite = unittest.TestSuite()
# XXX - add the others here!
#suite.addTest(unittest.FunctionTestCase(import_all))
for file in files:
base, ext = os.path.splitext(file)
if ext=='.py' and os.path.basename(base).startswith("test_"):
mod = __import__(base)
if hasattr(mod, "suite"):
test = mod.suite()
else:
test = unittest.defaultTestLoader.loadTestsFromModule(mod)
suite.addTest(test)
return suite
class CustomLoader(unittest.TestLoader):
def loadTestsFromModule(self, module):
return suite()
try:
unittest.TestProgram(testLoader=CustomLoader())(argv=sys.argv)
finally:
from xpcom import _xpcom
_xpcom.NS_ShutdownXPCOM() # To get leak stats and otherwise ensure life is good.
ni = _xpcom._GetInterfaceCount()
ng = _xpcom._GetGatewayCount()
if ni or ng:
# The old 'regrtest' that was not based purely on unittest did not
# do this check at the end - it relied on each module doing it itself.
# Thus, these leaks are not new, just newly noticed :) Likely to be
# something silly like module globals.
if ni == 6 and ng == 1:
print "Sadly, there are 6/1 leaks, but these appear normal and benign"
else:
print "********* WARNING - Leaving with %d/%d objects alive" % (ni,ng)
else:
print "yay! Our leaks have all vanished!"
|
cli_client/python/timesketch_cli_client/commands/importer.py | wajihyassine/timesketch | 1,810 | 12684354 | <filename>cli_client/python/timesketch_cli_client/commands/importer.py
# Copyright 2021 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for importing timelines."""
import sys
import time
import click
from timesketch_import_client import importer as import_client
@click.command('import')
@click.option('--name', help='Name of the timeline.')
@click.option(
'--timeout', type=int, default=600, help='Seconds to wait for indexing.')
@click.argument('file_path', type=click.Path(exists=True))
@click.pass_context
def importer(ctx, name, timeout, file_path):
"""Import timeline.
Args:
ctx: Click CLI context object.
name: Name of the timeline to create.
timeout: Seconds to wait for indexing.
file_path: File path to the file to import.
"""
sketch = ctx.obj.sketch
if not name:
name = click.format_filename(file_path, shorten=True)
timeline = None
with import_client.ImportStreamer() as streamer:
click.echo('Uploading to server .. ', nl=False)
streamer.set_sketch(sketch)
streamer.set_timeline_name(name)
streamer.set_provider('Timesketch CLI client')
# TODO: Consider using the whole command as upload context instead
# of the file path.
streamer.set_upload_context(file_path)
streamer.add_file(file_path)
timeline = streamer.timeline
if not timeline:
click.echo('Error creating timeline, please try again.')
sys.exit(1)
click.echo('Done')
# Poll the timeline status and wait for the timeline to be ready
click.echo('Indexing .. ', nl=False)
max_time_seconds = timeout
sleep_time_seconds = 5 # Sleep between API calls
max_retries = max_time_seconds / sleep_time_seconds
retry_count = 0
while True:
if retry_count >= max_retries:
click.echo(
('WARNING: The command timed out before indexing finished. '
'The timeline will continue to be indexed in the background'))
break
status = timeline.status
# TODO: Do something with other statuses? (e.g. failed)
if status == 'ready':
click.echo('Done')
break
retry_count += 1
time.sleep(sleep_time_seconds)
click.echo(f'Timeline imported: {timeline.name}')
|
test/mitmproxy/contentviews/test_hex.py | KarlParkinson/mitmproxy | 24,939 | 12684361 | <filename>test/mitmproxy/contentviews/test_hex.py
from mitmproxy.contentviews import hex
from . import full_eval
def test_view_hex():
v = full_eval(hex.ViewHex())
assert v(b"foo")
def test_render_priority():
v = hex.ViewHex()
assert not v.render_priority(b"ascii")
assert v.render_priority(b"\xFF")
assert not v.render_priority(b"")
|
arviz/plots/backends/matplotlib/dotplot.py | sudojarvis/arviz | 1,159 | 12684397 | """Matplotlib dotplot."""
import math
import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, create_axes_grid, backend_show
from ...plot_utils import plot_point_interval
from ...dotplot import wilkinson_algorithm, layout_stacks
def plot_dot(
values,
binwidth,
dotsize,
stackratio,
hdi_prob,
quartiles,
rotated,
dotcolor,
intervalcolor,
markersize,
markercolor,
marker,
figsize,
linewidth,
point_estimate,
nquantiles,
point_interval,
ax,
show,
backend_kwargs,
plot_kwargs,
):
"""Matplotlib dotplot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {**backend_kwarg_defaults(), **backend_kwargs}
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs["squeeze"] = True
(figsize, _, _, _, auto_linewidth, auto_markersize) = _scale_fig_size(figsize, None)
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", dotcolor)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
if ax is None:
fig_manager = _pylab_helpers.Gcf.get_active()
if fig_manager is not None:
ax = fig_manager.canvas.figure.gca()
else:
_, ax = create_axes_grid(
1,
backend_kwargs=backend_kwargs,
)
if point_interval:
ax = plot_point_interval(
ax,
values,
point_estimate,
hdi_prob,
quartiles,
linewidth,
markersize,
markercolor,
marker,
rotated,
intervalcolor,
"matplotlib",
)
if nquantiles > values.shape[0]:
warnings.warn(
"nquantiles must be less than or equal to the number of data points", UserWarning
)
nquantiles = values.shape[0]
else:
qlist = np.linspace(1 / (2 * nquantiles), 1 - 1 / (2 * nquantiles), nquantiles)
values = np.quantile(values, qlist)
if binwidth is None:
binwidth = math.sqrt((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi))
## Wilkinson's Algorithm
stack_locs, stack_count = wilkinson_algorithm(values, binwidth)
x, y = layout_stacks(stack_locs, stack_count, binwidth, stackratio, rotated)
for (x_i, y_i) in zip(x, y):
dot = plt.Circle((x_i, y_i), dotsize * binwidth / 2, **plot_kwargs)
ax.add_patch(dot)
if rotated:
ax.tick_params(bottom=False, labelbottom=False)
else:
ax.tick_params(left=False, labelleft=False)
ax.set_aspect("equal", adjustable="box")
ax.autoscale()
if backend_show(show):
plt.show()
return ax
|
runx/logx.py | B3EF/runx | 626 | 12684419 | """
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from collections import defaultdict
from contextlib import contextmanager
from shutil import copyfile
import csv
import os
import re
import shlex
import subprocess
import time
try:
from torch.utils.tensorboard import SummaryWriter
except ModuleNotFoundError:
from tensorboardX import SummaryWriter
import torch
try:
from .utils import (get_logroot, save_hparams, trn_names, val_names,
ConditionalProxy)
except ImportError:
# This is to allow the unit tests to run properly
from utils import (get_logroot, save_hparams, trn_names, val_names,
ConditionalProxy)
def is_list(x):
return isinstance(x, (list, tuple))
def get_gpu_utilization_pct():
'''
Use nvidia-smi to capture the GPU utilization, which is reported as an
integer in range 0-100.
'''
util = subprocess.check_output(
shlex.split('nvidia-smi --query-gpu="utilization.gpu" '
'--format=csv,noheader,nounits -i 0'))
util = util.decode('utf-8')
util = util.replace('\n', '')
return int(util)
class LogX(object):
def __init__(self, rank=0):
self.initialized = False
def initialize(self, logdir=None, coolname=False, hparams=None,
tensorboard=False, no_timestamp=False, global_rank=0,
eager_flush=True):
'''
Initialize logx
inputs
- logdir - where to write logfiles
- tensorboard - whether to write to tensorboard file
- global_rank - must set this if using distributed training, so we only
log from rank 0
- coolname - generate a unique directory name underneath logdir, else
use logdir as output directory
- hparams - only use if not launching jobs with runx, which also saves
the hparams.
- eager_flush - call `flush` after every tensorboard write
'''
self.rank0 = (global_rank == 0)
self.initialized = True
if logdir is not None:
self.logdir = logdir
else:
logroot = get_logroot()
if coolname:
from coolname import generate_slug
self.logdir = os.path.join(logroot, generate_slug(2))
else:
self.logdir = os.path.join(logroot, 'default')
# confirm target log directory exists
if not os.path.isdir(self.logdir):
os.makedirs(self.logdir, exist_ok=True)
if hparams is not None and self.rank0:
save_hparams(hparams, self.logdir)
# Tensorboard file
if self.rank0 and tensorboard:
self.tb_writer = SummaryWriter(log_dir=self.logdir,
flush_secs=1)
else:
self.tb_writer = None
self.eager_flush = eager_flush
# This allows us to use the tensorboard with automatic checking of both
# the `tensorboard` condition, as well as ensuring writes only happen
# on rank0. Any function supported by `SummaryWriter` is supported by
# `ConditionalProxy`. Additionally, flush will be called after any call
# to this.
self.tensorboard = ConditionalProxy(
self.tb_writer,
tensorboard and self.rank0,
post_hook=self._flush_tensorboard,
)
if not self.rank0:
return
# Metrics file
metrics_fn = os.path.join(self.logdir, 'metrics.csv')
self.metrics_fp = open(metrics_fn, mode='a+')
self.metrics_writer = csv.writer(self.metrics_fp, delimiter=',')
# Log file
log_fn = os.path.join(self.logdir, 'logging.log')
self.log_file = open(log_fn, mode='a+')
# save metric
self.save_metric = None
self.best_metric = None
self.save_ckpt_fn = ''
# Find the existing best checkpoint, and update `best_metric`,
# if available
self.best_ckpt_fn = self.get_best_checkpoint() or ''
if self.best_ckpt_fn:
best_chk = torch.load(self.best_ckpt_fn, map_location='cpu')
self.best_metric = best_chk.get('__metric', None)
self.epoch = defaultdict(lambda: 0)
self.no_timestamp = no_timestamp
# Initial timestamp, so that epoch time calculation is correct
phase = 'start'
csv_line = [phase]
# add epoch/iter
csv_line.append('{}/step'.format(phase))
csv_line.append(0)
# add timestamp
if not self.no_timestamp:
# this feature is useful for testing
csv_line.append('timestamp')
csv_line.append(time.time())
self.metrics_writer.writerow(csv_line)
self.metrics_fp.flush()
def __del__(self):
if self.initialized and self.rank0:
self.metrics_fp.close()
self.log_file.close()
def msg(self, msg):
'''
Print out message to std and to a logfile
'''
if not self.rank0:
return
print(msg)
self.log_file.write(msg + '\n')
self.log_file.flush()
def add_image(self, path, img, step=None):
'''
Write an image to the tensorboard file
'''
self.tensorboard.add_image(path, img, step)
def add_scalar(self, name, val, idx):
'''
Write a scalar to the tensorboard file
'''
self.tensorboard.add_scalar(name, val, idx)
def _flush_tensorboard(self):
if self.eager_flush and self.tb_writer is not None:
self.tb_writer.flush()
@contextmanager
def suspend_flush(self, flush_at_end=True):
prev_flush = self.eager_flush
self.eager_flush = False
yield
self.eager_flush = prev_flush
if flush_at_end:
self._flush_tensorboard()
def metric(self, phase, metrics, epoch=None):
"""Record train/val metrics. This serves the dual-purpose to write these
metrics to both a tensorboard file and a csv file, for each parsing by
sumx.
Arguments:
phase: 'train' or 'val'. sumx will only summarize val metrics.
metrics: dictionary of metrics to record
global_step: (optional) epoch or iteration number
"""
if not self.rank0:
return
# define canonical phase
if phase in trn_names:
canonical_phase = 'train'
elif phase in val_names:
canonical_phase = 'val'
else:
raise('expected phase to be one of {} {}'.format(str(val_names,
trn_names)))
if epoch is not None:
self.epoch[canonical_phase] = epoch
# Record metrics to csv file
csv_line = [canonical_phase]
for k, v in metrics.items():
csv_line.append(k)
csv_line.append(v)
# add epoch/iter
csv_line.append('epoch')
csv_line.append(self.epoch[canonical_phase])
# add timestamp
if not self.no_timestamp:
# this feature is useful for testing
csv_line.append('timestamp')
csv_line.append(time.time())
# To save a bit of disk space, only save validation metrics
if canonical_phase == 'val':
self.metrics_writer.writerow(csv_line)
self.metrics_fp.flush()
# Write updates to tensorboard file
with self.suspend_flush():
for k, v in metrics.items():
self.add_scalar('{}/{}'.format(phase, k), v,
self.epoch[canonical_phase])
# if no step, then keep track of it automatically
if epoch is None:
self.epoch[canonical_phase] += 1
@staticmethod
def is_better(save_metric, best_metric, higher_better):
return best_metric is None or \
higher_better and (save_metric > best_metric) or \
not higher_better and (save_metric < best_metric)
def save_model(self, save_dict, metric, epoch, higher_better=True,
delete_old=True):
"""Saves a model to disk. Keeps a separate copy of latest and best models.
Arguments:
save_dict: dictionary to save to checkpoint
epoch: epoch number, used to name checkpoint
metric: metric value to be used to evaluate whether this is the
best result
higher_better: True if higher valued metric is better, False
otherwise
delete_old: Delete prior 'lastest' checkpoints. By setting to
false, you'll get a checkpoint saved every time this
function is called.
"""
if not self.rank0:
return
save_dict['__metric'] = metric
if os.path.exists(self.save_ckpt_fn) and delete_old:
os.remove(self.save_ckpt_fn)
# Save out current model
self.save_ckpt_fn = os.path.join(
self.logdir, 'last_checkpoint_ep{}.pth'.format(epoch))
torch.save(save_dict, self.save_ckpt_fn)
self.save_metric = metric
is_better = self.is_better(self.save_metric, self.best_metric,
higher_better)
if is_better:
if os.path.exists(self.best_ckpt_fn):
os.remove(self.best_ckpt_fn)
self.best_ckpt_fn = os.path.join(
self.logdir, 'best_checkpoint_ep{}.pth'.format(epoch))
self.best_metric = self.save_metric
copyfile(self.save_ckpt_fn, self.best_ckpt_fn)
return is_better
def get_best_checkpoint(self):
"""
Finds the checkpoint in `self.logdir` that is considered best.
If, for some reason, there are multiple best checkpoint files, then
the one with the highest epoch will be preferred.
Returns:
None - If there is no best checkpoint file
path (str) - The full path to the best checkpoint otherwise.
"""
match_str = r'^best_checkpoint_ep([0-9]+).pth$'
best_epoch = -1
best_checkpoint = None
for filename in os.listdir(self.logdir):
match = re.fullmatch(match_str, filename)
if match is not None:
# Extract the epoch number
epoch = int(match.group(1))
if epoch > best_epoch:
best_epoch = epoch
best_checkpoint = filename
if best_checkpoint is None:
return None
return os.path.join(self.logdir, best_checkpoint)
def load_model(self, path):
"""Restore a model and return a dict with any meta data included in
the snapshot
"""
checkpoint = torch.load(path)
state_dict = checkpoint['state_dict']
meta = {k: v for k, v in checkpoint.items() if k != 'state_dict'}
return state_dict, meta
# Importing logx gives you access to this shared object
logx = LogX()
|
next_steps/operations/ml_ops/personalize-step-functions/lambdas/delete-dataset/delete-dataset.py | kamoljan/amazon-personalize-samples | 442 | 12684435 | <reponame>kamoljan/amazon-personalize-samples
from os import environ
from loader import Loader
import actions
LOADER = Loader()
def lambda_handler(event, context):
try:
response = LOADER.personalize_cli.delete_dataset(
datasetArn=event['datasetArn']
)
except Exception as e:
LOADER.logger.error(f'Error deleting dataset: {e}')
raise e
|
tests/test_session.py | hiyongz/uiautomator2 | 4,493 | 12684437 | # coding: utf-8
#
from collections import namedtuple
def test_session(sess):
sess.wlan_ip
sess.widget
sess.watcher
sess.image
sess.jsonrpc
sess.open_identify
sess.shell
sess.set_new_command_timeout
sess.settings
sess.taobao
sess.xpath
def test_session_app(sess, package_name):
sess.app_start(package_name)
assert sess.app_current()['package'] == package_name
sess.app_wait(package_name)
assert package_name in sess.app_list()
assert package_name in sess.app_list_running()
assert sess.app_info(package_name)['packageName'] == package_name
def test_session_window_size(sess):
assert isinstance(sess.window_size(), tuple)
|
tests/core/test_slice.py | jessevig/robustness-gym | 399 | 12684444 | """Unittests for Slices."""
from unittest import TestCase
from robustnessgym.core.slice import SliceDataPanel
from tests.testbeds import MockTestBedv0
class TestSlice(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
def test_from_dataset(self):
# Create a slice
sl = SliceDataPanel(self.testbed.dataset)
# Compare the slice identifier
self.assertEqual(str(sl), "RGSlice[num_rows: 6](MockDataset(version=1.0))")
# Length of the slice
self.assertEqual(len(sl), 6)
# Lineage of the slice
self.assertEqual(sl.lineage, [("Dataset", "MockDataset(version=1.0)")])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.