max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
components/isceobj/IsceProc/runOffoutliers.py | vincentschut/isce2 | 1,133 | 12666911 | <filename>components/isceobj/IsceProc/runOffoutliers.py
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2013 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Authors: <NAME>, <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runOffoutliers.py
import logging
import isceobj
logger = logging.getLogger('isce.isceProc.runOffoutliers')
def runOffoutliers(self, distance):
refPol = self._isce.refPol
stdWriter = self._stdWriter
for sceneid1, sceneid2 in self._isce.pairsToCoreg:
pair = (sceneid1, sceneid2)
rgOffsets = self._isce.refinedOffsetFields[pair]
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
sid = self._isce.formatname(pair)
offsetField = run(rgOffsets, distance, stdWriter, catalog=catalog, sceneid=sid)
self._isce.procDoc.addAllFromCatalog(catalog)
self._isce.refinedOffsetFields[pair] = offsetField
def run(rgOffsets, distance, stdWriter, catalog=None, sceneid='NO_ID'):
#offoutliers returns a list of modified locations
#the list of lists is
#list[0] = location across
#list[1] = location across offset
#list[2] = location down
#list[3] = location down offset
#list[4] = snr
#list[5] = sig
logger.info("Culling offset field outliers: %s" % sceneid)
objOff = isceobj.createOffoutliers()
objOff.wireInputPort(name='offsets', object=rgOffsets)
objOff.setSNRThreshold(2.0)
objOff.setDistance(distance)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
stdWriter.setFileTag("offoutliers", "log")
stdWriter.setFileTag("offoutliers", "err")
stdWriter.setFileTag("offoutliers", "out")
objOff.stdWriter = stdWriter.set_file_tags("offoutliers",
"log",
"err",
"out")
objOff.offoutliers()
if catalog is not None:
# Record the inputs and outputs
isceobj.Catalog.recordInputsAndOutputs(catalog, objOff,
"runOffoutliers.%s" % sceneid,
logger,
"runOffoutliers.%s" % sceneid)
return objOff.getRefinedOffsetField()
|
src/lib/output/json.py | security-geeks/userline | 255 | 12666941 | <reponame>security-geeks/userline
#
# Author: <NAME> (aka sch3m4)
# @sch3m4
# https://github.com/thiber-org/userline
#
import json
from lib import config
class JSON():
def __init__(self,fd,duplicate=False):
self.fd = fd
self.duplicate = duplicate
def add_sequence(self,event):
evt = []
aux = dict(event)
for k in list(aux.keys()):
if aux[k] == 'N/A':
del aux[k]
if self.duplicate:
logout = dict(aux)
if 'logoff.datetime' in aux.keys():
logout['datetime'] = aux['logoff.datetime']
logout['action'] = 'logoff'
evt.append(logout)
aux['datetime'] = aux['logon.datetime']
aux['action'] = 'logon'
evt.append(aux)
for i in evt:
self.fd.write(json.dumps(i,sort_keys=True, indent=config.JSON_INDENT)+'\n')
def finish(self):
self.fd.close()
|
data/micro-benchmark/parameters/imported_call/main.py | vitsalis/pycg-evaluation | 121 | 12666956 | from to_import import func
def param_func():
pass
func(param_func)
|
function/python/brightics/function/classification/test/xgb_classification_test.py | parkjh80/studio | 202 | 12666971 | <reponame>parkjh80/studio
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from brightics.common.datasets import load_iris
from brightics.function.classification import xgb_classification_train, xgb_classification_predict
import HtmlTestRunner
import os
class TestXgbClassification(unittest.TestCase):
def test_default(self):
df_iris = load_iris()
model_train = xgb_classification_train(table=df_iris,
feature_cols=['sepal_length', 'sepal_width'],
label_col='species',
max_depth=3, learning_rate=0.1, n_estimators=100, random_state=12345)['model']
df_feature_importance = model_train['feature_importance_table']
np.testing.assert_array_almost_equal([0.5877061486, 0.4122938514], [df_feature_importance.values[i][1] for i in range(2)], 10, 'incorrect feature_importance')
df_test = xgb_classification_predict(table=df_iris,
model=model_train,
prediction_col='prediction', probability_col='probability',
thresholds=None, suffix='index',
output_margin=False, ntree_limit=None)['out_table']
self.assertListEqual(['setosa'] * 5, df_test['prediction'].tolist()[:5], 'incorrect prediction')
np.testing.assert_array_almost_equal([0.9961014986, 0.9555388689, 0.9964415431, 0.9961314201, 0.9970849156], df_test['probability_0'].values[:5].astype('float64'), 10, 'incorrect probability_0')
np.testing.assert_array_almost_equal([0.0029145265, 0.0210829079, 0.0020782573, 0.001782414, 0.0019302238], df_test['probability_1'].values[:5].astype('float64'), 10, 'incorrect probability_1')
np.testing.assert_array_almost_equal([0.0009839422, 0.0233781971, 0.0014802075, 0.0020861221, 0.0009849136], df_test['probability_2'].values[:5].astype('float64'), 10, 'incorrect probability_2')
def test_class_weight(self):
df_iris = load_iris()
model_train = xgb_classification_train(table=df_iris,
feature_cols=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
label_col='species',
max_depth=3, learning_rate=0.1, n_estimators=100, class_weight=[0, 1, 1], random_state=12345)['model']
df_feature_importance = model_train['feature_importance_table']
np.testing.assert_array_almost_equal([0.114977307617,0.234493196010,0.332829058170,0.3177004456520], [df_feature_importance.values[i][1] for i in range(4)], 10, 'incorrect feature_importance')
df_test = xgb_classification_predict(table=df_iris,
model=model_train,
prediction_col='prediction', probability_col='probability',
thresholds=None, suffix='index',
output_margin=False, ntree_limit=None)['out_table']
self.assertListEqual(['versicolor'] * 5, df_test['prediction'].tolist()[:5], 'incorrect prediction')
np.testing.assert_array_almost_equal([0.0007314461, 0.0010454282, 0.0010394535, 0.0010394285, 0.0010394535], df_test['probability_0'].values[:5].astype('float64'), 10, 'incorrect probability_0')
np.testing.assert_array_almost_equal([0.9976045489, 0.9954549074, 0.9956334233, 0.9956094623, 0.9956334233], df_test['probability_1'].values[:5].astype('float64'), 10, 'incorrect probability_1')
np.testing.assert_array_almost_equal([0.0016639883, 0.0034996143, 0.0033270852, 0.0033510949, 0.0033270852], df_test['probability_2'].values[:5].astype('float64'), 10, 'incorrect probability_2')
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
|
libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/conversation_memory_scope.py | Fl4v/botbuilder-python | 388 | 12667043 | <reponame>Fl4v/botbuilder-python<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.core import ConversationState
from botbuilder.dialogs.memory import scope_path
from .bot_state_memory_scope import BotStateMemoryScope
class ConversationMemoryScope(BotStateMemoryScope):
def __init__(self):
super().__init__(ConversationState, scope_path.CONVERSATION)
|
examples/subparsers/subparsers_example.py | idoby/SimpleParsing | 150 | 12667044 | <filename>examples/subparsers/subparsers_example.py
from dataclasses import dataclass
from typing import Union
from pathlib import Path
from simple_parsing import ArgumentParser, subparsers
@dataclass
class Train:
"""Example of a command to start a Training run."""
# the training directory
train_dir: Path = Path("~/train")
def execute(self):
print(f"Training in directory {self.train_dir}")
@dataclass
class Test:
"""Example of a command to start a Test run."""
# the testing directory
test_dir: Path = Path("~/train")
def execute(self):
print(f"Testing in directory {self.test_dir}")
@dataclass
class Program:
"""Some top-level command"""
command: Union[Train, Test]
verbose: bool = False # log additional messages in the console.
def execute(self):
print(f"Program (verbose: {self.verbose})")
return self.command.execute()
parser = ArgumentParser()
parser.add_arguments(Program, dest="prog")
args = parser.parse_args()
prog: Program = args.prog
print("prog:", prog)
prog.execute()
|
dnachisel/reports/SpecAnnotationsTranslator.py | simone-pignotti/DnaChisel | 124 | 12667074 | <gh_stars>100-1000
from ..biotools import find_specification_label_in_feature
from .tools import install_extras_message
DFV_AVAILABLE = False
try:
from dna_features_viewer import BiopythonTranslator
DFV_AVAILABLE = True
except ImportError:
class BiopythonTranslator:
"Class unavailable. Install DNA Features Viewer."
def __init__(self):
raise ImportError(install_extras_message("DNA Features Viewer"))
class SpecAnnotationsTranslator(BiopythonTranslator):
"""Translator of DnaChisel feature-constraints for DNA Features Viewer"""
feature_prefixes_colors = {
"@": "#ce5454",
"~": "#e5be54",
"#": "#8edfff",
"!": "#fcff75",
}
def compute_filtered_features(self, features):
"""Do not display edits."""
return [
feature
for feature in features
if "".join(feature.qualifiers.get("is_edit", "false")) != "true"
]
def compute_feature_color(self, f):
color = f.qualifiers.get("color", None)
if color is not None:
if isinstance(color, list):
color = color[0]
return color
if f.type == "misc_feature":
specification = find_specification_label_in_feature(f)
if specification is not None:
return self.feature_prefixes_colors.get(
specification[0], "#f4df42"
)
return "#eeeafa"
def compute_feature_label(self, f):
is_edit = f.qualifiers.get("is_edit", "false")
if "true" in [is_edit, is_edit[0]]:
return None
default = BiopythonTranslator.compute_feature_label(self, f)
label = None if (f.type != "misc_feature") else default
if label == "misc_feature":
label = None
return label
|
mythril/laser/plugin/plugins/plugin_annotations.py | kalloc/mythril | 1,887 | 12667103 | <filename>mythril/laser/plugin/plugins/plugin_annotations.py<gh_stars>1000+
from mythril.laser.ethereum.state.annotation import (
StateAnnotation,
MergeableStateAnnotation,
)
from copy import copy
from typing import Dict, List, Set
import logging
log = logging.getLogger(__name__)
class MutationAnnotation(StateAnnotation):
"""Mutation Annotation
This is the annotation used by the MutationPruner plugin to record mutations
"""
def __init__(self):
pass
@property
def persist_over_calls(self) -> bool:
return True
class DependencyAnnotation(MergeableStateAnnotation):
"""Dependency Annotation
This annotation tracks read and write access to the state during each transaction.
"""
def __init__(self):
self.storage_loaded = set() # type: Set
self.storage_written = {} # type: Dict[int, Set]
self.has_call = False # type: bool
self.path = [0] # type: List
self.blocks_seen = set() # type: Set[int]
def __copy__(self):
result = DependencyAnnotation()
result.storage_loaded = copy(self.storage_loaded)
result.storage_written = copy(self.storage_written)
result.has_call = self.has_call
result.path = copy(self.path)
result.blocks_seen = copy(self.blocks_seen)
return result
def get_storage_write_cache(self, iteration: int):
return self.storage_written.get(iteration, set())
def extend_storage_write_cache(self, iteration: int, value: object):
if iteration not in self.storage_written:
self.storage_written[iteration] = set()
self.storage_written[iteration].add(value)
def check_merge_annotation(self, other: "DependencyAnnotation"):
if not isinstance(other, DependencyAnnotation):
raise TypeError("Expected an instance of DependencyAnnotation")
return self.has_call == other.has_call and self.path == other.path
def merge_annotation(self, other: "DependencyAnnotation"):
merged_annotation = DependencyAnnotation()
merged_annotation.blocks_seen = self.blocks_seen.union(other.blocks_seen)
merged_annotation.has_call = self.has_call
merged_annotation.path = copy(self.path)
merged_annotation.storage_loaded = self.storage_loaded.union(
other.storage_loaded
)
keys = set(
list(other.storage_written.keys()) + list(self.storage_written.keys())
)
for key in keys:
other_set = other.storage_written.get(key, set())
merged_annotation.storage_written[key] = self.storage_written.get(
key, set()
).union(other_set)
return merged_annotation
class WSDependencyAnnotation(MergeableStateAnnotation):
"""Dependency Annotation for World state
This world state annotation maintains a stack of state annotations.
It is used to transfer individual state annotations from one transaction to the next.
"""
def __init__(self):
self.annotations_stack: List[DependencyAnnotation] = []
def __copy__(self):
result = WSDependencyAnnotation()
result.annotations_stack = copy(self.annotations_stack)
return result
def check_merge_annotation(self, annotation: "WSDependencyAnnotation") -> bool:
if len(self.annotations_stack) != len(annotation.annotations_stack):
# We can only merge worldstate annotations that have seen an equal amount of transactions
# since the beginning of symbolic execution
return False
for a1, a2 in zip(self.annotations_stack, annotation.annotations_stack):
if a1 == a2:
continue
if (
isinstance(a1, MergeableStateAnnotation)
and isinstance(a2, MergeableStateAnnotation)
and a1.check_merge_annotation(a2) is True
):
continue
log.debug("Aborting merge between annotations {} and {}".format(a1, a2))
return False
return True
def merge_annotation(
self, annotation: "WSDependencyAnnotation"
) -> "WSDependencyAnnotation":
merged_annotation = WSDependencyAnnotation()
for a1, a2 in zip(self.annotations_stack, annotation.annotations_stack):
if a1 == a2:
merged_annotation.annotations_stack.append(copy(a1))
merged_annotation.annotations_stack.append(a1.merge_annotation(a2))
return merged_annotation
|
flask_accepts/decorators/__init__.py | cafetodev/flask_accepts | 170 | 12667110 | from .decorators import accepts, responds # noqa
|
setup/apps.py | SlapBass/nx-portal | 115 | 12667124 | <filename>setup/apps.py
from django.apps import AppConfig
class SetupConfig(AppConfig):
name = 'setup'
|
Demo/simple.py | reqa/python-ldap | 299 | 12667132 | import sys,getpass
import ldap
#l = ldap.open("localhost", 31001)
l = ldap.open("marta.it.uq.edu.au")
login_dn = "cn=root,ou=CSEE,o=UQ,c=AU"
login_pw = getpass.getpass("Password for %s: " % login_dn)
l.simple_bind_s(login_dn, login_pw)
#
# create a new sub organisation
#
try:
dn = "ou=CSEE,o=UQ,c=AU"
print("Adding", repr(dn))
l.add_s(dn,
[
("objectclass",["organizationalUnit"]),
("ou", ["CSEE"]),
("description", [
"Department of Computer Science and Electrical Engineering"]),
]
)
except _ldap.LDAPError:
pass
#
# create an entry for me
#
dn = "cn=<NAME>,ou=CSEE,o=UQ,c=AU"
print("Updating", repr(dn))
try:
l.delete_s(dn)
except:
pass
l.add_s(dn,
[
("objectclass", ["organizationalPerson"]),
("sn", ["Leonard"]),
("cn", ["<NAME>"]),
("description", ["Ph.D. student"]),
("display-name", ["<NAME>"]),
#("commonname", ["<NAME>"]),
("mail", ["<EMAIL>"]),
("othermailbox", ["<EMAIL>"]),
("givenname", ["David"]),
("surname", ["Leonard"]),
("seeAlso", ["http://www.csee.uq.edu.au/~leonard/"]),
("url", ["http://www.csee.uq.edu.au/~leonard/"]),
#("homephone", []),
#("fax", []),
#("otherfacsimiletelephonenumber",[]),
#("officefax", []),
#("mobile", []),
#("otherpager", []),
#("officepager", []),
#("pager", []),
("info", ["info"]),
("title", ["Mr"]),
#("telephonenumber", []),
("l", ["Brisbane"]),
("st", ["Queensland"]),
("c", ["AU"]),
("co", ["co"]),
("o", ["UQ"]),
("ou", ["CSEE"]),
#("homepostaladdress", []),
#("postaladdress", []),
#("streetaddress", []),
#("street", []),
("department", ["CSEE"]),
("comment", ["comment"]),
#("postalcode", []),
("physicaldeliveryofficename", ["Bldg 78, UQ, St Lucia"]),
("preferredDeliveryMethod", ["email"]),
("initials", ["DRL"]),
("conferenceinformation", ["MS-conferenceinformation"]),
#("usercertificate", []),
("labeleduri", ["labeleduri"]),
("manager", ["cn=<NAME>"]),
("reports", ["reports"]),
("jpegPhoto", [open("/www/leonard/leonard.jpg").read()]),
("uid", ["leonard"]),
("userPassword", [""])
])
#
# search beneath the CSEE/UQ/AU tree
#
res = l.search_s(
"ou=CSEE, o=UQ, c=AU",
_ldap.SCOPE_SUBTREE,
"objectclass=*",
)
print(res)
l.unbind()
|
kornia/utils/draw.py | saurabhya/kornia | 418 | 12667141 | from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from kornia.testing import KORNIA_CHECK, KORNIA_CHECK_SHAPE
# TODO: implement width of the line
def _draw_pixel(image: torch.Tensor, x: int, y: int, color: torch.Tensor) -> None:
r"""Draws a pixel into an image.
Args:
image: the input image to where to draw the lines with shape :math`(C,H,W)`.
x: the x coordinate of the pixel.
y: the y coordinate of the pixel.
color: the color of the pixel with :math`(C)` where :math`C` is the number of channels of the image.
Return:
Nothing is returned.
"""
image[:, y, x] = color
def draw_line(image: torch.Tensor, p1: torch.Tensor, p2: torch.Tensor, color: torch.Tensor) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape :math`(C,H,W)`.
p1: the start point [x y] of the line with shape (2).
p2: the end point [x y] of the line with shape (2).
color: the color of the line with shape :math`(C)` where :math`C` is the number of channels of the image.
Return:
the image with containing the line.
Examples:
>>> image = torch.zeros(1, 8, 8)
>>> draw_line(image, torch.tensor([6, 4]), torch.tensor([1, 4]), torch.tensor([255]))
tensor([[[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 255., 255., 255., 255., 255., 255., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]]])
"""
if (len(p1) != 2) or (len(p2) != 2):
raise ValueError("p1 and p2 must have length 2.")
if len(image.size()) != 3:
raise ValueError("image must have 3 dimensions (C,H,W).")
if color.size(0) != image.size(0):
raise ValueError("color must have the same number of channels as the image.")
if (p1[0] >= image.size(2)) or (p1[1] >= image.size(1) or (p1[0] < 0) or (p1[1] < 0)):
raise ValueError("p1 is out of bounds.")
if (p2[0] >= image.size(2)) or (p2[1] >= image.size(1) or (p2[0] < 0) or (p2[1] < 0)):
raise ValueError("p2 is out of bounds.")
# move p1 and p2 to the same device as the input image
# move color to the same device and dtype as the input image
p1 = p1.to(image.device).to(torch.int64)
p2 = p2.to(image.device).to(torch.int64)
color = color.to(image)
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if A < 0:
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if B != 0:
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2).long(), max(x1, x2).long()
y1, y2 = min(y1, y2).long(), max(y1, y2).long()
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1 : y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1 : x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
def draw_rectangle(
image: torch.Tensor, rectangle: torch.Tensor, color: Optional[torch.Tensor] = None, fill: Optional[bool] = None
) -> torch.Tensor:
r"""Draw N rectangles on a batch of image tensors.
Args:
image: is tensor of BxCxHxW.
rectangle: represents number of rectangles to draw in BxNx4
N is the number of boxes to draw per batch index[x1, y1, x2, y2]
4 is in (top_left.x, top_left.y, bot_right.x, bot_right.y).
color: a size 1, size 3, BxNx1, or BxNx3 tensor.
If C is 3, and color is 1 channel it will be broadcasted.
fill: is a flag used to fill the boxes with color if True.
Returns:
This operation modifies image inplace but also returns the drawn tensor for
convenience with same shape the of the input BxCxHxW.
Example:
>>> img = torch.rand(2, 3, 10, 12)
>>> rect = torch.tensor([[[0, 0, 4, 4]], [[4, 4, 10, 10]]])
>>> out = draw_rectangle(img, rect)
"""
batch, c, h, w = image.shape
batch_rect, num_rectangle, num_points = rectangle.shape
if batch != batch_rect:
raise AssertionError("Image batch and rectangle batch must be equal")
if num_points != 4:
raise AssertionError("Number of points in rectangle must be 4")
# clone rectangle, in case it's been expanded assignment from clipping causes problems
rectangle = rectangle.long().clone()
# clip rectangle to hxw bounds
rectangle[:, :, 1::2] = torch.clamp(rectangle[:, :, 1::2], 0, h - 1)
rectangle[:, :, ::2] = torch.clamp(rectangle[:, :, ::2], 0, w - 1)
if color is None:
color = torch.tensor([0.0] * c).expand(batch, num_rectangle, c)
if fill is None:
fill = False
if len(color.shape) == 1:
color = color.expand(batch, num_rectangle, c)
b, n, color_channels = color.shape
if color_channels == 1 and c == 3:
color = color.expand(batch, num_rectangle, c)
for b in range(batch):
for n in range(num_rectangle):
if fill:
image[
b,
:,
int(rectangle[b, n, 1]) : int(rectangle[b, n, 3] + 1),
int(rectangle[b, n, 0]) : int(rectangle[b, n, 2] + 1),
] = color[b, n, :, None, None]
else:
image[b, :, int(rectangle[b, n, 1]) : int(rectangle[b, n, 3] + 1), rectangle[b, n, 0]] = color[
b, n, :, None
]
image[b, :, int(rectangle[b, n, 1]) : int(rectangle[b, n, 3] + 1), rectangle[b, n, 2]] = color[
b, n, :, None
]
image[b, :, rectangle[b, n, 1], int(rectangle[b, n, 0]) : int(rectangle[b, n, 2] + 1)] = color[
b, n, :, None
]
image[b, :, rectangle[b, n, 3], int(rectangle[b, n, 0]) : int(rectangle[b, n, 2] + 1)] = color[
b, n, :, None
]
return image
def _get_convex_edges(polygon: Tensor, h: int, w: int) -> Tuple[Tensor, Tensor]:
r"""Gets the left and right edges of a polygon for each y-coordinate y \in [0, h)
Args:
polygons: represents polygons to draw in BxNx2
N is the number of points
2 is (x, y).
h: bottom most coordinate (top coordinate is assumed to be 0)
w: right most coordinate (left coordinate is assumed to be 0)
Returns:
The left and right edges of the polygon of shape (B,B).
"""
dtype = polygon.dtype
# Check if polygons are in loop closed format, if not -> make it so
if not torch.allclose(polygon[..., -1, :], polygon[..., 0, :]):
polygon = torch.cat((polygon, polygon[..., :1, :]), dim=-2) # (B, N+1, 2)
# Partition points into edges
x_start, y_start = polygon[..., :-1, 0], polygon[..., :-1, 1]
x_end, y_end = polygon[..., 1:, 0], polygon[..., 1:, 1]
# Create scanlines, edge dx/dy, and produce x values
ys = torch.arange(h, device=polygon.device, dtype=dtype)
dx = ((x_end - x_start) / (y_end - y_start + 1e-12)).clamp(-w, w)
xs = (ys[..., :, None] - y_start[..., None, :]) * dx[..., None, :] + x_start[..., None, :]
# Only count edge in their active regions (i.e between the vertices)
valid_edges = (y_start[..., None, :] <= ys[..., :, None]).logical_and(ys[..., :, None] <= y_end[..., None, :])
valid_edges |= (y_start[..., None, :] >= ys[..., :, None]).logical_and(ys[..., :, None] >= y_end[..., None, :])
x_left_edges = xs.clone()
x_left_edges[~valid_edges] = w
x_right_edges = xs.clone()
x_right_edges[~valid_edges] = -1
# Find smallest and largest x values for the valid edges
x_left = x_left_edges.min(dim=-1).values
x_right = x_right_edges.max(dim=-1).values
return x_left, x_right
def _batch_polygons(polygons: List[Tensor]) -> Tensor:
r"""Converts a List of variable length polygons into a fixed size tensor.
Works by repeating the last element in the tensor.
Args:
polygon: List of variable length polygons of shape [N_1 x 2, N_2 x 2, ..., N_B x 2].
B is the batch size,
N_i is the number of points,
2 is (x, y).
Returns:
A fixed size tensor of shape (B, N, 2) where N = max_i(N_i)
"""
B, N = len(polygons), len(max(polygons, key=len))
batched_polygons = torch.zeros(B, N, 2, dtype=polygons[0].dtype, device=polygons[0].device)
for b, p in enumerate(polygons):
batched_polygons[b] = torch.cat((p, p[-1:].expand(N - len(p), 2))) if len(p) < N else p
return batched_polygons
def draw_convex_polygon(images: Tensor, polygons: Union[Tensor, List[Tensor]], colors: Tensor) -> Tensor:
r"""Draws convex polygons on a batch of image tensors.
Args:
images: is tensor of BxCxHxW.
polygons: represents polygons as points, either BxNx2 or List of variable length polygons.
N is the number of points.
2 is (x, y).
color: a B x 3 tensor or 3 tensor with color to fill in.
Returns:
This operation modifies image inplace but also returns the drawn tensor for
convenience with same shape the of the input BxCxHxW.
Note:
This function assumes a coordinate system (0, h - 1), (0, w - 1) in the image, with (0, 0) being the center
of the top-left pixel and (w - 1, h - 1) being the center of the bottom-right coordinate.
Example:
>>> img = torch.rand(1, 3, 12, 16)
>>> poly = torch.tensor([[[4, 4], [12, 4], [12, 8], [4, 8]]])
>>> color = torch.tensor([[0.5, 0.5, 0.5]])
>>> out = draw_convex_polygon(img, poly, color)
"""
# TODO: implement optional linetypes for smooth edges
KORNIA_CHECK_SHAPE(images, ["B", "C", "H", "W"])
b_i, c_i, h_i, w_i, device = *images.shape, images.device
if isinstance(polygons, List):
polygons = _batch_polygons(polygons)
b_p, _, xy, device_p, dtype_p = *polygons.shape, polygons.device, polygons.dtype
if len(colors.shape) == 1:
colors = colors.expand(b_i, c_i)
b_c, _, device_c = *colors.shape, colors.device
KORNIA_CHECK(xy == 2, "Polygon vertices must be xy, i.e. 2-dimensional")
KORNIA_CHECK(b_i == b_p == b_c, "Image, polygon, and color must have same batch dimension")
KORNIA_CHECK(device == device_p == device_c, "Image, polygon, and color must have same device")
x_left, x_right = _get_convex_edges(polygons, h_i, w_i)
ws = torch.arange(w_i, device=device, dtype=dtype_p)[None, None, :]
fill_region = (ws >= x_left[..., :, None]) & (ws <= x_right[..., :, None])
images = (~fill_region[:, None]) * images + fill_region[:, None] * colors[..., None, None]
return images
|
tests/openbb_terminal/core/log/collection/test_logging_clock.py | tehcoderer/GamestonkTerminal | 255 | 12667178 | <filename>tests/openbb_terminal/core/log/collection/test_logging_clock.py
from datetime import datetime
import pytest
from openbb_terminal.core.log.collection import logging_clock
clock = logging_clock.LoggingClock()
now = datetime.now()
def mock_next(**_):
raise NotImplementedError
@pytest.mark.parametrize(
"precision", [logging_clock.Precision.hour, logging_clock.Precision.minute]
)
def test_calculate_next_sharp(precision):
value = clock.calculate_next_sharp(now, precision)
assert value
def test_calculate_next_sharp_invalid():
with pytest.raises(Exception):
clock.calculate_next_sharp(now, "bad")
# TODO: find a better way to mock the while loop
def test_do_action_every_sharp(mocker):
mock = mocker.Mock()
mock.count = 0
mock.mock_next = mock_next
with pytest.raises(NotImplementedError):
clock.do_action_every_sharp(mock.mock_next)
def test_run(mocker):
mocker.patch(
"openbb_terminal.core.log.collection.logging_clock.LoggingClock.do_action_every_sharp"
)
clock.run()
def test_default_action():
clock.default_action()
|
discodo/client/http.py | eunwoo1104/discodo | 105 | 12667187 | import logging
from ..errors import HTTPException
from .models import ensureQueueObjectType
log = logging.getLogger("discodo.client.http")
class HTTPClient:
def __init__(self, client):
self.VoiceClient = client
self.Node = client.Node
self.loop = client.Node.loop
@property
def headers(self) -> dict:
return {
"Authorization": self.Node.password,
"User-ID": str(self.Node.user_id),
"Guild-ID": str(self.VoiceClient.guild_id),
"VoiceClient-ID": str(self.VoiceClient.id),
}
async def fetch(self, method, endpoint, **kwargs):
URL = self.Node.URL + endpoint
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"].update(self.headers)
async with self.Node.session.request(method, URL, **kwargs) as response:
log.debug(f"{method} {URL} with {kwargs} has returned {response.status}")
data = ensureQueueObjectType(
self.VoiceClient, await response.json(content_type=None)
)
if 200 <= response.status < 300:
return data
raise HTTPException(response.status, data)
async def getSource(self, query):
return await self.fetch("GET", "/getSource", params={"query": query})
async def searchSources(self, query):
return await self.fetch("GET", "/searchSources", params={"query": query})
async def getVCContext(self):
return await self.fetch("GET", "/context")
async def setVCContext(self, data):
return await self.fetch("POST", "/context", json={"context": data})
async def putSource(self, source):
return await self.fetch("POST", "/putSource", json={"source": source})
async def loadSource(self, query):
return await self.fetch("POST", "/loadSource", json={"query": query})
async def getOptions(self):
return await self.fetch("GET", "/options")
async def setOptions(self, options):
return await self.fetch("POST", "/options", json=options)
async def getSeek(self):
return await self.fetch("GET", "/seek")
async def seek(self, offset):
return await self.fetch("POST", "/seek", json={"offset": offset})
async def skip(self, offset):
return await self.fetch("POST", "/skip", json={"offset": offset})
async def pause(self):
return await self.fetch("POST", "/pause")
async def resume(self):
return await self.fetch("POST", "/resume")
async def shuffle(self):
return await self.fetch("POST", "/shuffle")
async def queue(self):
return await self.fetch("GET", "/queue")
async def getCurrent(self):
return await self.fetch("GET", "/current")
async def getQueueSource(self, tag):
return await self.fetch("GET", f"/queue/{tag}")
async def setCurrent(self, data):
return await self.fetch("POST", "/current", json=data)
async def setQueueSource(self, tag, data):
return await self.fetch("POST", f"/queue/{tag}", json=data)
async def removeQueueSource(self, tag):
return await self.fetch("DELETE", f"/queue/{tag}")
|
Lib/rubicon/objc/__init__.py | snazari/Pyto | 701 | 12667202 | # Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = "0.3.1.dev1"
# Import commonly used submodules right away.
# The first few imports are only included for clarity. They are not strictly necessary, because the from-imports below
# also import the types and runtime modules and implicitly add them to the rubicon.objc namespace.
from . import types # noqa: F401
from . import runtime # noqa: F401
from . import api # noqa: F401
# The import of collections is important, however. The classes from collections are not meant to be used directly,
# instead they are registered with the runtime module (using the for_objcclass decorator) so they are used in place of
# ObjCInstance when representing Foundation collections in Python. If this module is not imported, the registration
# will not take place, and Foundation collections will not support the expected methods/operators in Python!
from . import collections # noqa: F401
from .types import ( # noqa: F401
CFIndex,
CFRange,
CGFloat,
CGGlyph,
CGPoint,
CGPointMake,
CGRect,
CGRectMake,
CGSize,
CGSizeMake,
NSEdgeInsets,
NSEdgeInsetsMake,
NSInteger,
NSMakePoint,
NSMakeRect,
NSMakeSize,
NSPoint,
NSRange,
NSRect,
NSSize,
NSTimeInterval,
NSUInteger,
NSZeroPoint,
UIEdgeInsets,
UIEdgeInsetsMake,
UIEdgeInsetsZero,
UniChar,
unichar,
)
from .runtime import SEL, send_message, send_super # noqa: F401
from .api import ( # noqa: F401
Block,
NSArray,
NSDictionary,
NSMutableArray,
NSMutableDictionary,
NSObject,
NSObjectProtocol,
ObjCBlock,
ObjCClass,
ObjCInstance,
ObjCMetaClass,
ObjCProtocol,
at,
ns_from_py,
objc_classmethod,
objc_const,
objc_ivar,
objc_method,
objc_property,
objc_rawmethod,
py_from_ns,
)
|
test cases/python/3 cython/cytest.py | kira78/meson | 4,047 | 12667205 | #!/usr/bin/env python3
from storer import Storer
s = Storer()
if s.get_value() != 0:
raise SystemExit('Initial value incorrect.')
s.set_value(42)
if s.get_value() != 42:
raise SystemExit('Setting value failed.')
try:
s.set_value('not a number')
raise SystemExit('Using wrong argument type did not fail.')
except TypeError:
pass
|
tests/components/evil_genius_labs/test_init.py | MrDelik/core | 30,023 | 12667251 | """Test evil genius labs init."""
import pytest
from homeassistant import config_entries
from homeassistant.components.evil_genius_labs import PLATFORMS
@pytest.mark.parametrize("platforms", [PLATFORMS])
async def test_setup_unload_entry(hass, setup_evil_genius_labs, config_entry):
"""Test setting up and unloading a config entry."""
assert len(hass.states.async_entity_ids()) == 1
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert config_entry.state == config_entries.ConfigEntryState.NOT_LOADED
|
excel4lib/sheet/__init__.py | aaaddress1/boobsnail | 169 | 12667260 | <reponame>aaaddress1/boobsnail<filename>excel4lib/sheet/__init__.py
from .cell import *
from .worksheet import * |
binproperty/views.py | wh8983298/GreaterWMS | 1,063 | 12667267 | <gh_stars>1000+
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from .filter import Filter
class APIViewSet(viewsets.ModelViewSet):
"""
list:
Response a data list(all)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_queryset(self):
if self.request.user:
return ListModel.objects.filter(is_delete=False)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.BinpropertyGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
|
package/tests/database/test_settings.py | R-fred/awesome-streamlit | 1,194 | 12667277 | <filename>package/tests/database/test_settings.py<gh_stars>1000+
"""In this module we test that there is a module settings and is has the required
attributes and functionality"""
from awesome_streamlit.database import settings
def test_github():
"""Test that there is a GITHUB_URL Setting"""
assert settings.GITHUB_URL
def test_github_raw():
"""Test that there is a GITHUB_RAW_URL Setting"""
assert settings.GITHUB_RAW_URL
|
demo/seg_scripts.py | ebrahimebrahim/easyreg | 107 | 12667288 | <filename>demo/seg_scripts.py
import os
import sys
import torch
from easyreg.piplines import run_one_task
import argparse
from task import ModelTask
class SegmentationTraining():
def __init__(self, args):
self.args = args
def _set_environment(self):
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../easyreg'))
torch.backends.cudnn.benchmark=True
def train(self):
return
def _create_folders(self):
self._create_folder(self.output_root_path)
self._create_folder(self.task_output_path)
self._create_folder(self.data_task_path)
self._create_folder(self.setting_backup)
def _create_folder(self, path):
if not os.path.exists(path):
os.makedirs(path)
else:
print("Warning, {} exists, press Y/y to overide, N/n to stop")
user_inp = input()
if user_inp in ["Y", "y"]:
os.makedirs(path)
elif user_inp in ["N", "n"]:
exit()
def __do_segmentation_train(self, pipeline=None):
"""
set running env and run the task
:param pipeline:a Pipeline object
:return: a Pipeline object
"""
self.pipeline = pipeline
self.output_root_path = self.args.output_root_path
self.task_name = self.args.task_name
self.data_task_name = self.args.data_task_name
self.setting_folder_path = self.args.setting_folder_path
self.data_task_path = os.path.join(output_root_path,data_task_name)
self.task_output_path = os.path.join(data_task_path,task_name)
os.makedirs(task_output_path, exist_ok=True)
dm, tsm = self.init_train_env()
tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id
self.dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None
self.tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')
tsm.save(self.tsm_json_path)
if dm is not None:
dm.save(self.dm_json_path)
data_loaders = pipeline.data_loaders if self.pipeline is not None else None
self.pipeline = run_one_task(self.tsm_json_path, self.dm_json_path, data_loaders)
def init_train_env(self):
assert os.path.isfile(self.tsm_json_path),"task setting not exists"
dm = DataTask('task_reg', self.dm_json_path) if os.path.isfile(self.dm_json_path) else None
tsm = ModelTask('task_reg',tsm_json_path)
self.data_task_name = self.data_task_name if len(self.data_task_name)>0 else 'custom'
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = self.output_root_path
dm.data_par['datapro']['dataset']['task_name'] = self.data_task_name
tsm.task_par['tsk_set']['task_name'] = self.task_name
tsm.task_par['tsk_set']['output_root_path'] = self.data_task_path
return dm, tsm
def save_settings(self):
self.setting_folder_path = args.setting_folder_path
self.dm_json_path = os.path.join(setting_folder_path, 'cur_data_setting.json')
self.tsm_json_path = os.path.join(setting_folder_path, 'cur_task_setting.json')
dm = DataTask('task_reg', self.dm_json_path) if os.path.isfile(self.dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
task_name = args.task_name_record
setting_backup = os.path.join(setting_folder_path, task_name+'_backup')
os.makedirs(setting_backup, exist_ok=True)
dm_backup_json_path = os.path.join(setting_backup, 'cur_data_setting.json')
tsm_backup_json_path =os.path.join(setting_backup,'cur_task_setting.json')
tsm.save(tsm_backup_json_path)
if dm is not None:
dm.save(dm_backup_json_path)
if __name__ == '__main__':
"""
An interface for learning segmentation methods.
This script will generate the three folders for the training, if the folder is not found in the given path
It is recommended to use CUDA_VISIBLE_DEVICES to control the data parallelism, but it is possible to
Assume there is three level folder, output_root_path/ data_task_folder/ task_folder
Arguments:
--output_root_path/ -o: the path of output folder
--data_task_name/ -dtn: data task name i.e. lung_reg_task , oai_reg_task
--task_name / -tn: task name i.e. run_training_vsvf_task, run_training_rdmm_task
--setting_folder_path/ -ts: path of the folder where settings are saved,should include cur_task_setting.json
--gpu_id/ -g: gpu_id to use
"""
parser = argparse.ArgumentParser(description="An easy interface for training segmentation models")
parser.add_argument('-o','--output_root_path', required=False, type=str,
default=None,help='the path of output folder')
parser.add_argument('-dtn','--data_task_name', required=False, type=str,
default='',help='the name of the data related task (like subsampling)')
parser.add_argument('-tn','--task_name', required=False, type=str,
default=None,help='the name of the task')
parser.add_argument('-ts','--setting_folder_path', required=False, type=str,
default=None,help='path of the folder where settings are saved,should include cur_task_setting.json)')
parser.add_argument('-g',"--gpu_id",required=False,type=int,default=0,help='gpu_id to use')
args = parser.parse_args()
print(args)
trainer = SegmentationTraining(args)
trainer.train()
# do_segmentation_train(args)
|
src/deepke/name_entity_re/few_shot/__init__.py | johncolezhang/DeepKE | 710 | 12667292 | from .models import *
from .module import *
from .utils import * |
lib/models/invert.py | conan7882/CNN-Visualization | 201 | 12667297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: invert.py
# Author: <NAME> <<EMAIL>>
import tensorflow as tf
import numpy as np
from tensorcv.models.base import BaseModel
class InvertCNN(BaseModel):
def __init__(self, im_h, im_w, im_c, input_mean=0, input_std=1.0, mean_list=None):
init = tf.random_normal([1, im_h, im_w, im_c])
self.invert_im = tf.get_variable('invert_im',
initializer=init,
# shape=[1, im_h, im_w, im_c],
trainable=True)
self._mean = mean_list
self._input_std = input_std
def _total_variation(self, image):
var_x = tf.pow(image[:, 1:, :-1, :] - image[:, :-1, :-1, :], 2)
var_y = tf.pow(image[:, :-1, 1:, :] - image[:, :-1, :-1, :], 2)
return tf.reduce_sum(var_x + var_y)
def get_loss(self, feat_invert, feat_im):
self.mse_loss = 5e-4 * tf.losses.mean_squared_error(feat_invert, feat_im)
self.vt_loss = 0.0000005 * self._total_variation(self.invert_im)
self.loss = 1000 * self.mse_loss + 0*self.vt_loss
return self.loss
def optimize_image(self, feat_invert, feat_im):
loss = self.get_loss(feat_invert, feat_im)
# opt = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
opt = tf.train.AdamOptimizer(learning_rate=0.1)
return opt.minimize(loss)
def get_opt_im(self):
im = self.invert_im
# if self._mean is not None:
# im = self._add_mean(im)
return im |
src/main/resources/resource/Intro/Servo01_stop.py | holgerfriedrich/myrobotlab | 179 | 12667315 | #########################################
# Servo01_stop.py
# categories: intro
# more info @: http://myrobotlab.org/service/Intro
#########################################
# uncomment for virtual hardware
# Platform.setVirtual(True)
# Every settings like limits / port number / controller are saved after initial use
# so you can share them between differents script
# servoPin01 = 4
# port = "/dev/ttyUSB0"
# port = "COM15"
# release a servo controller and a servo
Runtime.releaseService("arduino")
Runtime.releaseService("servo01")
# we tell to the service what is going on
# intro.isServoActivated = False ## FIXME this gives error readonly
intro.broadcastState()
|
tests/unit/helloworld/multimodal/conftest.py | Rohitpandit021/jina | 15,179 | 12667339 | import os
import pytest
@pytest.fixture(autouse=True)
def setup_hellworld_env(tmpdir):
os.environ['HW_WORKDIR'] = str(tmpdir)
yield
os.environ.pop("HW_WORKDIR")
|
utest/test/keywords/test_firefox_profile_parsing.py | hugovk/SeleniumLibrary | 792 | 12667379 | import os
import unittest
from approvaltests.approvals import verify_all
from approvaltests.reporters.generic_diff_reporter_factory import (
GenericDiffReporterFactory,
)
from robot.utils import WINDOWS
from selenium import webdriver
from SeleniumLibrary.keywords import WebDriverCreator
class FireFoxProfileParsingTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log_dir = "/log/dir"
cls.creator = WebDriverCreator(cls.log_dir)
path = os.path.dirname(__file__)
reporter_json = os.path.abspath(
os.path.join(path, "..", "approvals_reporters.json")
)
factory = GenericDiffReporterFactory()
factory.load(reporter_json)
cls.reporter = factory.get_first_working()
def setUp(self):
self.results = []
@unittest.skipIf(WINDOWS, reason="ApprovalTest do not support different line feeds")
def test_single_method(self):
self._parse_result(
self.creator._get_ff_profile('set_preference("key1", "arg1")')
)
self._parse_result(
self.creator._get_ff_profile(
'set_preference("key1", "arg1");set_preference("key1", "arg1")'
)
)
self._parse_result(
self.creator._get_ff_profile(
'set_preference("key1", "arg1") ; set_preference("key2", "arg2")'
)
)
profile = self.creator._get_ff_profile("update_preferences()")
self.results.append(isinstance(profile, webdriver.FirefoxProfile))
try:
self.creator._get_ff_profile('wrong_name("key1", "arg1")')
except AttributeError as error:
self.results.append(error)
try:
self.creator._get_ff_profile('set_proxy("foo")')
except Exception as error:
self.results.append(str(error))
verify_all("Firefox profile parsing", self.results, reporter=self.reporter)
def _parse_result(self, result):
to_str = ""
if "key1" in result.default_preferences:
to_str = f"{to_str} key1 {result.default_preferences['key1']}"
if "key2" in result.default_preferences:
to_str = f"{to_str} key2 {result.default_preferences['key2']}"
self.results.append(to_str)
|
ModelConf.py | woailaosang/NeuronBlocks | 1,257 | 12667412 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import codecs
import json
import os
import tempfile
import random
import string
import copy
import torch
import logging
import shutil
from losses.BaseLossConf import BaseLossConf
#import traceback
from settings import LanguageTypes, ProblemTypes, TaggingSchemes, SupportedMetrics, PredictionTypes, DefaultPredictionFields, ConstantStatic
from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json
from utils.exceptions import ConfigurationError
import numpy as np
class ConstantStaticItems(ConstantStatic):
@staticmethod
def concat_key_desc(key_prefix_desc, key):
return key_prefix_desc + '.' + key
@staticmethod
def get_value_by_key(json, key, key_prefix='', use_default=False, default=None):
"""
Args:
json: a json object
key: a key pointing to the value wanted to acquire
use_default: if you really want to use default value when key can not be found in json object, set use_default=True
default: if key is not found and default is None, we would raise an Exception, except that use_default is True
Returns:
value:
"""
try:
value = json[key]
except:
if not use_default:
raise ConfigurationError("key[%s] can not be found in configuration file" % (key_prefix + key))
else:
value = default
return value
@staticmethod
def add_item(item_name, use_default=False, default=None):
def add_item_loading_func(use_default, default, func_get_value_by_key):
@classmethod
def load_data(cls, obj, json, key_prefix_desc='', use_default=use_default, default=default, func_get_value_by_key=func_get_value_by_key):
obj.__dict__[cls.__name__] = func_get_value_by_key(json, cls.__name__, key_prefix_desc, use_default, default)
return obj
return load_data
return type(item_name, (ConstantStatic, ), dict(load_data=add_item_loading_func(use_default, default, __class__.get_value_by_key)))
@classmethod
def load_data(cls, obj, json, key_prefix_desc=''):
if cls.__name__ in json.keys():
json = json[cls.__name__]
for key in cls.__dict__.keys():
if not hasattr(cls.__dict__[key], 'load_data'):
continue
item = cls.__dict__[key]
obj = item.load_data(obj, json, cls.concat_key_desc(key_prefix_desc, item.__name__))
return obj
class ModelConf(object):
def __init__(self, phase, conf_path, nb_version, params=None, mode='normal'):
""" loading configuration from configuration file and argparse parameters
Args:
phase: train/test/predict/cache
specially, 'cache' phase is used for verifying old cache
conf_path:
params:
mode: 'normal', 'philly'
"""
self.phase = phase
assert self.phase in set(['train', 'test', 'predict', 'cache'])
self.conf_path = conf_path
self.params = params
self.mode = mode.lower()
assert self.mode in set(['normal', 'philly']), 'Your mode %s is illegal, supported modes are: normal and philly!'
self.load_from_file(conf_path)
self.check_version_compat(nb_version, self.tool_version)
if phase != 'cache':
self.check_conf()
logging.debug('Print ModelConf below:')
logging.debug('=' * 80)
# print ModelConf
for name, value in vars(self).items():
if name.startswith("__") is False:
logging.debug('%s: %s' % (str(name), str(value)))
logging.debug('=' * 80)
class Conf(ConstantStaticItems):
license = ConstantStaticItems.add_item('license')
tool_version = ConstantStaticItems.add_item('tool_version')
model_description = ConstantStaticItems.add_item('model_description')
language = ConstantStaticItems.add_item('language', use_default=True, default='english')
class inputs(ConstantStaticItems):
use_cache = ConstantStaticItems.add_item('use_cache', use_default=True, default=True)
dataset_type = ConstantStaticItems.add_item('dataset_type')
tagging_scheme = ConstantStaticItems.add_item('tagging_scheme', use_default=True, default=None)
class data_paths(ConstantStaticItems):
train_data_path = ConstantStaticItems.add_item('train_data_path', use_default=True, default=None)
valid_data_path = ConstantStaticItems.add_item('valid_data_path', use_default=True, default=None)
test_data_path = ConstantStaticItems.add_item('test_data_path', use_default=True, default=None)
predict_data_path = ConstantStaticItems.add_item('predict_data_path', use_default=True, default=None)
pre_trained_emb = ConstantStaticItems.add_item('pre_trained_emb', use_default=True, default=None)
pretrained_model_path = ConstantStaticItems.add_item('pretrained_model_path', use_default=True, default=None)
file_with_col_header = ConstantStaticItems.add_item('file_with_col_header', use_default=True, default=False)
pretrained_emb_type = ConstantStaticItems.add_item('pretrained_emb_type', use_default=True, default='glove')
pretrained_emb_binary_or_text = ConstantStaticItems.add_item('pretrained_emb_binary_or_text', use_default=True, default='text')
involve_all_words_in_pretrained_emb = ConstantStaticItems.add_item('involve_all_words_in_pretrained_emb', use_default=True, default=False)
add_start_end_for_seq = ConstantStaticItems.add_item('add_start_end_for_seq', use_default=True, default=False)
file_header = ConstantStaticItems.add_item('file_header', use_default=True, default=None)
predict_file_header = ConstantStaticItems.add_item('predict_file_header', use_default=True, default=None)
model_inputs = ConstantStaticItems.add_item('model_inputs')
target = ConstantStaticItems.add_item('target', use_default=True, default=None)
positive_label = ConstantStaticItems.add_item('positive_label', use_default=True, default=None)
class outputs(ConstantStaticItems):
save_base_dir = ConstantStaticItems.add_item('save_base_dir', use_default=True, default=None)
model_name = ConstantStaticItems.add_item('model_name')
train_log_name = ConstantStaticItems.add_item('train_log_name', use_default=True, default=None)
test_log_name = ConstantStaticItems.add_item('test_log_name', use_default=True, default=None)
predict_log_name = ConstantStaticItems.add_item('predict_log_name', use_default=True, default=None)
predict_fields = ConstantStaticItems.add_item('predict_fields', use_default=True, default=None)
predict_output_name = ConstantStaticItems.add_item('predict_output_name', use_default=True, default='predict.tsv')
cache_dir = ConstantStaticItems.add_item('cache_dir', use_default=True, default=None)
class training_params(ConstantStaticItems):
class vocabulary(ConstantStaticItems):
min_word_frequency = ConstantStaticItems.add_item('min_word_frequency', use_default=True, default=3)
max_vocabulary = ConstantStaticItems.add_item('max_vocabulary', use_default=True, default=800 * 1000)
max_building_lines = ConstantStaticItems.add_item('max_building_lines', use_default=True, default=1000 * 1000)
optimizer = ConstantStaticItems.add_item('optimizer', use_default=True, default=None)
clip_grad_norm_max_norm = ConstantStaticItems.add_item('clip_grad_norm_max_norm', use_default=True, default=-1)
chunk_size = ConstantStaticItems.add_item('chunk_size', use_default=True, default=1000 * 1000)
lr_decay = ConstantStaticItems.add_item('lr_decay', use_default=True, default=1)
minimum_lr = ConstantStaticItems.add_item('minimum_lr', use_default=True, default=0)
epoch_start_lr_decay = ConstantStaticItems.add_item('epoch_start_lr_decay', use_default=True, default=1)
use_gpu = ConstantStaticItems.add_item('use_gpu', use_default=True, default=False)
cpu_num_workers = ConstantStaticItems.add_item('cpu_num_workers', use_default=True, default=-1) #by default, use all workers cpu supports
batch_size = ConstantStaticItems.add_item('batch_size', use_default=True, default=1)
batch_num_to_show_results = ConstantStaticItems.add_item('batch_num_to_show_results', use_default=True, default=10)
max_epoch = ConstantStaticItems.add_item('max_epoch', use_default=True, default=float('inf'))
valid_times_per_epoch = ConstantStaticItems.add_item('valid_times_per_epoch', use_default=True, default=None)
steps_per_validation = ConstantStaticItems.add_item('steps_per_validation', use_default=True, default=10)
text_preprocessing = ConstantStaticItems.add_item('text_preprocessing', use_default=True, default=list())
max_lengths = ConstantStaticItems.add_item('max_lengths', use_default=True, default=None)
fixed_lengths = ConstantStaticItems.add_item('fixed_lengths', use_default=True, default=None)
tokenizer = ConstantStaticItems.add_item('tokenizer', use_default=True, default=None)
architecture = ConstantStaticItems.add_item('architecture')
loss = ConstantStaticItems.add_item('loss', use_default=True, default=None)
metrics = ConstantStaticItems.add_item('metrics', use_default=True, default=None)
def raise_configuration_error(self, key):
raise ConfigurationError(
"The configuration file %s is illegal. the item [%s] is not found." % (self.conf_path, key))
def load_from_file(self, conf_path):
# load file
self.conf = load_from_json(conf_path, debug=False)
self = self.Conf.load_data(self, {'Conf' : self.conf}, key_prefix_desc='Conf')
self.language = self.language.lower()
self.configurate_outputs()
self.configurate_inputs()
self.configurate_training_params()
self.configurate_architecture()
self.configurate_loss()
self.configurate_cache()
def configurate_outputs(self):
def configurate_logger(self):
if self.phase == 'cache':
return
# dir
if hasattr(self.params, 'log_dir') and self.params.log_dir:
self.log_dir = self.params.log_dir
prepare_dir(self.log_dir, True, allow_overwrite=True)
else:
self.log_dir = self.save_base_dir
# path
self.train_log_path = os.path.join(self.log_dir, self.train_log_name)
self.test_log_path = os.path.join(self.log_dir, self.test_log_name)
self.predict_log_path = os.path.join(self.log_dir, self.predict_log_name)
if self.phase == 'train':
log_path = self.train_log_path
elif self.phase == 'test':
log_path = self.test_log_path
elif self.phase == 'predict':
log_path = self.predict_log_path
if log_path is None:
self.raise_configuration_error(self.phase + '_log_name')
# log level
if self.mode == 'philly' or self.params.debug:
log_set(log_path, console_level='DEBUG', console_detailed=True, disable_log_file=self.params.disable_log_file)
else:
log_set(log_path, disable_log_file=self.params.disable_log_file)
# save base dir
if hasattr(self.params, 'model_save_dir') and self.params.model_save_dir:
self.save_base_dir = self.params.model_save_dir
elif self.save_base_dir is None:
self.raise_configuration_error('save_base_dir')
# prepare save base dir
if self.phase != 'cache':
prepare_dir(self.save_base_dir, True, allow_overwrite=self.params.force or self.mode == 'philly',
extra_info='will overwrite model file and train.log' if self.phase=='train' else 'will add %s.log and predict file'%self.phase)
# logger
configurate_logger(self)
# predict output path
if self.phase != 'cache':
if self.params.predict_output_path:
self.predict_output_path = self.params.predict_output_path
else:
self.predict_output_path = os.path.join(self.save_base_dir, self.predict_output_name)
logging.debug('Prepare dir for: %s' % self.predict_output_path)
prepare_dir(self.predict_output_path, False, allow_overwrite=self.params.force or self.mode == 'philly')
if self.predict_fields is None:
self.predict_fields = DefaultPredictionFields[ProblemTypes[self.problem_type]]
self.model_save_path = os.path.join(self.save_base_dir, self.model_name)
def configurate_inputs(self):
def configurate_data_path(self):
self.pretrained_emb_path =self.pre_trained_emb
if self.mode != "normal":
self.train_data_path = None
self.valid_data_path = None
self.test_data_path = None
self.predict_data_path = None
self.pretrained_emb_path = None
if hasattr(self.params, 'train_data_path') and self.params.train_data_path:
self.train_data_path = self.params.train_data_path
if hasattr(self.params, 'valid_data_path') and self.params.valid_data_path:
self.valid_data_path = self.params.valid_data_path
if hasattr(self.params, 'test_data_path') and self.params.test_data_path:
self.test_data_path = self.params.test_data_path
if hasattr(self.params, 'predict_data_path') and self.params.predict_data_path:
self.predict_data_path = self.params.predict_data_path
if hasattr(self.params, 'pretrained_emb_path') and self.params.pretrained_emb_path:
self.pretrained_emb_path = self.params.pretrained_emb_path
if self.phase == 'train' or self.phase == 'cache':
if self.valid_data_path is None and self.test_data_path is not None:
# We support test_data_path == None, if someone set valid_data_path to None while test_data_path is not None,
# swap the valid_data_path and test_data_path
self.valid_data_path = self.test_data_path
self.test_data_path = None
elif self.phase == 'predict':
if self.predict_data_path is None and self.test_data_path is not None:
self.predict_data_path = self.test_data_path
self.test_data_path = None
return self
def configurate_data_format(self):
# file columns
if self.phase == 'train' or self.phase == 'test' or self.phase == 'cache':
self.file_columns = self.file_header
if self.file_columns is None:
self.raise_configuration_error('file_columns')
if self.phase == 'predict':
self.file_columns, self.predict_file_columns = self.file_header, self.predict_file_header
if self.file_columns is None and self.predict_file_columns is None:
self.raise_configuration_error('predict_file_columns')
if self.file_columns and self.predict_file_columns is None:
self.predict_file_columns = self.file_columns
# target
if self.phase != 'predict':
self.answer_column_name = self.target
if self.target is None and self.phase != 'cache':
self.raise_configuration_error('target')
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging and self.add_start_end_for_seq is None:
self.add_start_end_for_seq = True
# pretrained embedding
if 'word' in self.architecture[0]['conf'] and self.pretrained_emb_path:
if hasattr(self.params, 'involve_all_words_in_pretrained_emb') and self.params.involve_all_words_in_pretrained_emb:
self.involve_all_words_in_pretrained_emb = self.params.involve_all_words_in_pretrained_emb
if hasattr(self.params, 'pretrained_emb_type') and self.params.pretrained_emb_type:
self.pretrained_emb_type = self.params.pretrained_emb_type
if hasattr(self.params, 'pretrained_emb_binary_or_text') and self.params.pretrained_emb_binary_or_text:
self.pretrained_emb_binary_or_text = self.params.pretrained_emb_binary_or_text
self.pretrained_emb_dim = self.architecture[0]['conf']['word']['dim']
else:
self.pretrained_emb_path = None
self.involve_all_words_in_pretrained_emb = None
self.pretrained_emb_type = None
self.pretrained_emb_binary_or_text = None
self.pretrained_emb_dim = None
return self
def configurate_model_input(self):
self.object_inputs = self.model_inputs
self.object_inputs_names = [name for name in self.object_inputs]
return self
self.problem_type = self.dataset_type.lower()
# previous model path
if hasattr(self.params, 'previous_model_path') and self.params.previous_model_path:
self.previous_model_path = self.params.previous_model_path
else:
self.previous_model_path = os.path.join(self.save_base_dir, self.model_name)
# pretrained model path
if hasattr(self.params, 'pretrained_model_path') and self.params.pretrained_model_path:
self.pretrained_model_path = self.params.pretrained_model_path
# saved problem path
model_path = None
if self.phase == 'train':
model_path = self.pretrained_model_path
elif self.phase == 'test' or self.phase == 'predict':
model_path = self.previous_model_path
if model_path:
model_path_dir = os.path.dirname(model_path)
self.saved_problem_path = os.path.join(model_path_dir, '.necessary_cache', 'problem.pkl')
if not os.path.isfile(self.saved_problem_path):
self.saved_problem_path = os.path.join(model_path_dir, 'necessary_cache', 'problem.pkl')
if not (os.path.isfile(model_path) and os.path.isfile(self.saved_problem_path)):
raise Exception('Previous trained model %s or its dictionaries %s does not exist!' % (model_path, self.saved_problem_path))
configurate_data_path(self)
configurate_data_format(self)
configurate_model_input(self)
def configurate_training_params(self):
# optimizer
if self.phase == 'train':
if self.optimizer is None:
self.raise_configuration_error('training_params.optimizer')
if 'name' not in self.optimizer.keys():
self.raise_configuration_error('training_params.optimizer.name')
self.optimizer_name = self.optimizer['name']
if 'params' not in self.optimizer.keys():
self.raise_configuration_error('training_params.optimizer.params')
self.optimizer_params = self.optimizer['params']
if hasattr(self.params, 'learning_rate') and self.params.learning_rate:
self.optimizer_params['lr'] = self.params.learning_rate
# batch size
self.batch_size_each_gpu = self.batch_size # the batch_size in conf file is the batch_size on each GPU
if hasattr(self.params, 'batch_size') and self.params.batch_size:
self.batch_size_each_gpu = self.params.batch_size
if self.batch_size_each_gpu is None:
self.raise_configuration_error('training_params.batch_size')
self.batch_size_total = self.batch_size_each_gpu
if torch.cuda.device_count() > 1:
self.batch_size_total = torch.cuda.device_count() * self.batch_size_each_gpu
self.batch_num_to_show_results = self.batch_num_to_show_results // torch.cuda.device_count()
if hasattr(self.params, 'max_epoch') and self.params.max_epoch:
self.max_epoch = self.params.max_epoch
if self.valid_times_per_epoch is not None:
logging.info("configuration[training_params][valid_times_per_epoch] is deprecated, please use configuration[training_params][steps_per_validation] instead")
# sequence length
if self.fixed_lengths:
self.max_lengths = None
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging:
self.fixed_lengths = None
self.max_lengths = None
# text preprocessing
self.__text_preprocessing = self.text_preprocessing
self.DBC2SBC = True if 'DBC2SBC' in self.__text_preprocessing else False
self.unicode_fix = True if 'unicode_fix' in self.__text_preprocessing else False
self.remove_stopwords = True if 'remove_stopwords' in self.__text_preprocessing else False
# tokenzier
if self.tokenizer is None:
self.tokenizer = 'jieba' if self.language == 'chinese' else 'nltk'
# GPU/CPU
if self.phase != 'cache':
if torch.cuda.is_available() and torch.cuda.device_count() > 0 and self.use_gpu:
logging.info("Activating GPU mode, there are %d GPUs available" % torch.cuda.device_count())
else:
self.use_gpu = False
logging.info("Activating CPU mode")
def configurate_architecture(self):
self.input_types = self.architecture[0]['conf']
# extra feature
feature_all = set([_.lower() for _ in self.input_types.keys()])
formal_feature = set(['word', 'char'])
extra_feature_num = feature_all - formal_feature
self.extra_feature = len(extra_feature_num) != 0
if self.extra_feature:
if self.DBC2SBC:
logging.warning("Detect the extra feature %s, set the DBC2sbc is False." % ''.join(list(extra_feature_num)))
if self.unicode_fix:
logging.warning("Detect the extra feature %s, set the unicode_fix is False." % ''.join(list(extra_feature_num)))
if self.remove_stopwords:
logging.warning("Detect the extra feature %s, set the remove_stopwords is False." % ''.join(list(extra_feature_num)))
# output layer
self.output_layer_id = []
for single_layer in self.architecture:
if 'output_layer_flag' in single_layer and single_layer['output_layer_flag']:
self.output_layer_id.append(single_layer['layer_id'])
# check CNN layer & change min sentence length
cnn_rele_layers = ['Conv', 'ConvPooling']
self.min_sentence_len = 0
for layer_index, single_layer in enumerate(self.architecture):
if layer_index == 0:
continue
if sum([_ == single_layer['layer'] for _ in cnn_rele_layers]):
# get window_size conf: type maybe int or list
for single_conf, single_conf_value in single_layer['conf'].items():
if 'window' in single_conf.lower():
self.min_sentence_len = max(self.min_sentence_len, np.max(np.array([single_conf_value])))
break
def configurate_loss(self):
if self.phase != 'train' and self.phase != 'test':
return
if self.loss is None or self.metrics is None:
self.raise_configuration_error('loss/metrics')
self.loss = BaseLossConf.get_conf(**self.loss)
if 'auc' in self.metrics and ProblemTypes[self.problem_type] == ProblemTypes.classification:
self.pos_label = self.positive_label
def configurate_cache(self):
# whether use cache
if self.mode == 'philly':
self.use_cache = True
# cache dir
if self.phase == 'train':
if hasattr(self.params, 'cache_dir') and self.params.cache_dir:
self.cache_dir = self.params.cache_dir
else:
if self.mode == 'normal':
if self.use_cache is False:
self.cache_dir = os.path.join(tempfile.gettempdir(), 'neuron_blocks', ''.join(random.sample(string.ascii_letters+string.digits, 16)))
else:
# for philly mode, we can only save files in model_path or scratch_path
self.cache_dir = os.path.join(self.save_base_dir, 'cache')
self.problem_path = os.path.join(self.cache_dir, 'problem.pkl')
if self.pretrained_emb_path is not None:
self.emb_pkl_path = os.path.join(self.cache_dir, 'emb.pkl')
else:
self.emb_pkl_path = None
else:
tmp_problem_path = os.path.join(self.save_base_dir, '.necessary_cache', 'problem.pkl')
self.problem_path = tmp_problem_path if os.path.isfile(tmp_problem_path) else os.path.join(self.save_base_dir, 'necessary_cache', 'problem.pkl')
# md5 of training data and problem
self.train_data_md5 = None
if self.phase == 'train' and self.train_data_path:
logging.info("Calculating the md5 of traing data ...")
self.train_data_md5 = md5([self.train_data_path])
logging.info("the md5 of traing data is %s"%(self.train_data_md5))
self.problem_md5 = None
# encoding
self.encoding_cache_dir = None
self.encoding_cache_index_file_path = None
self.encoding_cache_index_file_md5_path = None
self.encoding_file_index = None
self.encoding_cache_legal_line_cnt = 0
self.encoding_cache_illegal_line_cnt = 0
self.load_encoding_cache_generator = None
def check_conf(self):
""" verify if the configuration is legal or not
Returns:
"""
# In philly mode, ensure the data and model etc. are not the local paths defined in configuration file.
if self.mode == 'philly':
assert not (hasattr(self.params, 'train_data_path') and self.params.train_data_path is None and hasattr(self, 'train_data_path') and self.train_data_path), 'In philly mode, but you define a local train_data_path:%s in your configuration file' % self.train_data_path
assert not (hasattr(self.params, 'valid_data_path') and self.params.valid_data_path is None and hasattr(self, 'valid_data_path') and self.valid_data_path), 'In philly mode, but you define a local valid_data_path:%s in your configuration file' % self.valid_data_path
assert not (hasattr(self.params, 'test_data_path') and self.params.test_data_path is None and hasattr(self, 'test_data_path') and self.test_data_path), 'In philly mode, but you define a local test_data_path:%s in your configuration file' % self.test_data_path
if self.phase == 'train':
assert hasattr(self.params, 'model_save_dir') and self.params.model_save_dir, 'In philly mode, you must define a model save dir through the training params'
assert not (self.params.pretrained_model_path is None and self.pretrained_model_path), 'In philly mode, but you define a local pretrained model path:%s in your configuration file' % self.pretrained_model_path
assert not (self.pretrained_model_path is None and self.params.pretrained_emb_path is None and self.pretrained_emb_path), 'In philly mode, but you define a local pretrained embedding:%s in your configuration file' % self.pretrained_emb_path
elif self.phase == 'test' or self.phase == 'predict':
assert not (self.params.previous_model_path is None and self.previous_model_path), 'In philly mode, but you define a local model trained previously %s in your configuration file' % self.previous_model_path
# check inputs
# it seems that os.path.isfile cannot detect hdfs files
if self.phase == 'train':
assert self.train_data_path is not None, "Please define train_data_path"
assert os.path.isfile(self.train_data_path), "Training data %s does not exist!" % self.train_data_path
assert self.valid_data_path is not None, "Please define valid_data_path"
assert os.path.isfile(self.valid_data_path), "Training data %s does not exist!" % self.valid_data_path
if hasattr(self, 'pretrained_emb_type') and self.pretrained_emb_type:
assert self.pretrained_emb_type in set(['glove', 'word2vec', 'fasttext']), 'Embedding type %s is not supported! We support glove, word2vec, fasttext now.'
if hasattr(self, 'pretrained_emb_binary_or_text') and self.pretrained_emb_binary_or_text:
assert self.pretrained_emb_binary_or_text in set(['text', 'binary']), 'Embedding file type %s is not supported! We support text and binary.'
elif self.phase == 'test':
assert self.test_data_path is not None, "Please define test_data_path"
assert os.path.isfile(self.test_data_path), "Training data %s does not exist!" % self.test_data_path
elif self.phase == 'predict':
assert self.predict_data_path is not None, "Please define predict_data_path"
assert os.path.isfile(self.predict_data_path), "Training data %s does not exist!" % self.predict_data_path
# check language types
SUPPORTED_LANGUAGES = set(LanguageTypes._member_names_)
assert self.language in SUPPORTED_LANGUAGES, "Language type %s is not supported now. Supported types: %s" % (self.language, ",".join(SUPPORTED_LANGUAGES))
# check problem types
SUPPORTED_PROBLEMS = set(ProblemTypes._member_names_)
assert self.problem_type in SUPPORTED_PROBLEMS, "Data type %s is not supported now. Supported types: %s" % (self.problem_type, ",".join(SUPPORTED_PROBLEMS))
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging:
SUPPORTED_TAGGING_SCHEMES = set(TaggingSchemes._member_names_)
assert self.tagging_scheme is not None, "For sequence tagging proble, tagging scheme must be defined at configuration[\'inputs\'][\'tagging_scheme\']!"
assert self.tagging_scheme in SUPPORTED_TAGGING_SCHEMES, "Tagging scheme %s is not supported now. Supported schemes: %s" % (self.tagging_scheme, ",".join(SUPPORTED_TAGGING_SCHEMES))
# the max_lengths of all the inputs and targets should be consistent
if self.max_lengths:
max_lengths = list(self.max_lengths.values())
for i in range(len(max_lengths) - 1):
assert max_lengths[i] == max_lengths[i + 1], "For sequence tagging tasks, the max_lengths of all the inputs and targets should be consistent!"
# check appliable metrics
if self.phase == 'train' or self.phase == 'test':
self.metrics_post_check = set() # saved to check later
diff = set(self.metrics) - SupportedMetrics[ProblemTypes[self.problem_type]]
illegal_metrics = []
for diff_metric in diff:
if diff_metric.find('@') != -1:
field, target = diff_metric.split('@')
#if not field in PredictionTypes[ProblemTypes[self.problem_type]]:
if field != 'auc':
illegal_metrics.append(diff_metric)
else:
if target != 'average':
self.metrics_post_check.add(diff_metric)
if len(illegal_metrics) > 0:
raise Exception("Metrics %s are not supported for %s tasks!" % (",".join(list(illegal_metrics)), self.problem_type))
# check predict fields
if self.phase == 'predict':
self.predict_fields_post_check = set() # saved to check later
diff = set(self.predict_fields) - PredictionTypes[ProblemTypes[self.problem_type]]
illegal_fields = []
for diff_field in diff:
if diff_field.find('@') != -1 and diff_field.startswith('confidence'):
field, target = diff_field.split('@')
#if not field in PredictionTypes[ProblemTypes[self.problem_type]]:
if field != 'confidence':
illegal_fields.append(diff_field)
else:
# don't know if the target exists in the output dictionary, check after problem loaded
self.predict_fields_post_check.add(diff_field)
else:
illegal_fields.append(diff_field)
if len(illegal_fields) > 0:
raise Exception("The prediction fields %s is/are not supported!" % ",".join(illegal_fields))
def check_version_compat(self, nb_version, conf_version):
""" check if the version of toolkit and configuration file is compatible
Args:
nb_version: x.y.z
conf_version: x.y.z
Returns:
If the x field and y field are both the same, return True, else return False
"""
nb_version_split = nb_version.split('.')
conf_version_split = conf_version.split('.')
if len(nb_version_split) != len(conf_version_split):
raise ConfigurationError('The tool_version field of your configuration is illegal!')
if not (nb_version_split[0] == conf_version_split[0] and nb_version_split[1] == conf_version_split[1]):
raise ConfigurationError('The NeuronBlocks version is %s, but the configuration version is %s, please update your configuration to %s.%s.X' % (nb_version, conf_version, nb_version_split[0], nb_version_split[1]))
def back_up(self, params):
shutil.copy(params.conf_path, self.save_base_dir)
logging.info('Configuration file is backed up to %s' % (self.save_base_dir))
|
core/models.py | vlafranca/stream_framework_example | 102 | 12667485 | from django.db import models
from django.conf import settings
from django.utils.timezone import make_naive
import pytz
class BaseModel(models.Model):
class Meta:
abstract = True
class Item(BaseModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
image = models.ImageField(upload_to='items')
source_url = models.TextField()
message = models.TextField(blank=True, null=True)
pin_count = models.IntegerField(default=0)
# class Meta:
# db_table = 'pinterest_example_item'
class Board(BaseModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
name = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
slug = models.SlugField()
class Pin(BaseModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
item = models.ForeignKey(Item)
board = models.ForeignKey(Board)
influencer = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='influenced_pins')
message = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
def create_activity(self):
from stream_framework.activity import Activity
from core.verbs import Pin as PinVerb
activity = Activity(
self.user_id,
PinVerb,
self.id,
self.influencer_id,
time=make_naive(self.created_at, pytz.utc),
extra_context=dict(item_id=self.item_id)
)
return activity
class Follow(BaseModel):
'''
A simple table mapping who a user is following.
For example, if user is Kyle and Kyle is following Alex,
the target would be Alex.
'''
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='following_set')
target = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='follower_set')
created_at = models.DateTimeField(auto_now_add=True)
from core import verbs
|
pkg_pytorch/blendtorch/btt/finder.py | kongdai123/pytorch-blender | 381 | 12667486 | import os
import subprocess
import re
import shutil
import logging
import tempfile
from pathlib import Path
logger = logging.getLogger('blendtorch')
script = r'''
import zmq
'''
def discover_blender(additional_blender_paths=None):
'''Return Blender info as dict with keys `path`, `major`, `minor`.'''
my_env = os.environ.copy()
if additional_blender_paths is not None:
my_env['PATH'] = additional_blender_paths + os.pathsep + my_env['PATH']
# Determine path
bpath = shutil.which('blender', path=my_env['PATH'])
if bpath is None:
logger.warning('Could not find Blender.')
return None
else:
logger.debug(f'Discovered Blender in {bpath}')
bpath = Path(bpath).resolve()
p = subprocess.Popen(f'"{bpath}" --version',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
out, err = p.communicate()
errcode = p.returncode
# Determine version
r = re.compile(r'Blender\s(\d+)\.(\d+)', re.IGNORECASE)
g = re.search(r, str(out))
version = (None, None)
if errcode == 0 and g is not None:
version = (int(g[1]),int(g[2]))
else:
logger.warning('Failed to parse Blender version.')
return None
# Check if a minimal Python script works
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fp:
fp.write(script)
p = subprocess.Popen(f'"{bpath}" --background --python-use-system-env --python-exit-code 255 --python {fp.name}',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
out, err = p.communicate()
errcode = p.returncode
os.remove(fp.name)
if errcode != 0:
logger.warning('Failed to run minimal Blender script; ensure Python requirements are installed.')
return None
return {'path':bpath, 'major':version[0], 'minor':version[1]}
def _main():
print(discover_blender())
if __name__ == '__main__':
_main() |
cacreader/swig-4.0.2/Examples/python/import_packages/split_modules/vanilla_split/pkg1/__init__.py | kyletanyag/LL-Smartcard | 1,031 | 12667516 | # killroy was here
|
t/base_test.py | orioledb/orioledb | 947 | 12667523 | #!/usr/bin/env python3
# coding: utf-8
import unittest
import testgres
import sys
import os
import random
import string
import time
import hashlib
import base64
import inspect
from threading import Thread
from testgres.enums import NodeStatus
from testgres.consts import PG_CONF_FILE
class BaseTest(unittest.TestCase):
replica = None
basePort = None
def getTestNum(self):
testFullName = inspect.getfile(self.__class__)
names = []
for entry in os.scandir(os.path.dirname(testFullName)):
if entry.is_file() and entry.name.endswith('_test.py') and entry.name != 'base_test.py':
names.append(entry.name)
names.sort()
return names.index(os.path.basename(testFullName))
def getBasePort(self):
if self.basePort is None:
self.basePort = int(os.getenv('TESTGRES_BASE_PORT', '20000')) + self.getTestNum() * 2
return self.basePort
def getReplica(self):
if self.replica is None:
replica = self.node.backup().spawn_replica('replica')
replica.port = self.getBasePort() + 1
replica.append_conf(filename=PG_CONF_FILE, line='\n')
replica.append_conf(filename=PG_CONF_FILE, port=replica.port)
self.replica = replica
return self.replica
def setUp(self):
self.startTime = time.time()
self.node = testgres.get_new_node('test', port = self.getBasePort())
self.node.init() # run initdb
self.node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n")
def list2reason(self, exc_list):
if exc_list and exc_list[-1][0] is self:
return exc_list[-1][1]
def tearDown(self):
result = self.defaultTestResult() # these 2 methods have no side effects
self._feedErrorsToResult(result, self._outcome.errors)
error = self.list2reason(result.errors)
failure = self.list2reason(result.failures)
ok = not error and not failure
if self.node.status() == NodeStatus.Running:
self.node.stop() # just comment it if node should not stops on fails
pass
if ok:
self.node.cleanup()
else:
print("\nBase directory: " + self.node.base_dir)
if self.replica:
if self.replica.status() == NodeStatus.Running:
self.replica.stop() # just comment it if node should not stops on fails
pass
if ok:
self.replica._custom_base_dir = None
self.replica.cleanup()
else:
print("\nReplica base directory: " + self.replica.base_dir)
t = time.time() - self.startTime
sys.stderr.write('%.3f s ' % (t,))
def genString(self, id, length):
i = 0
chunkLen = 21
result = b''
while i * chunkLen < length:
m = hashlib.md5()
m.update((str(id) + '-' + str(i)).encode('ascii'))
result = result + base64.b64encode(m.digest())
i = i + 1
return result[0:length].decode('ascii')
def assertErrorMessageEquals(self, e: Exception, err_msg: str,
hint_msg: str = None):
if (hasattr(e, 'exception')):
e = e.exception
if (hasattr(e, 'pgerror')):
exp_msg = "ERROR: %s\n" % (err_msg)
if (hint_msg != None):
exp_msg += "HINT: %s\n" % (hint_msg)
self.assertEqual(e.pgerror,
exp_msg)
elif (hasattr(e, 'message')):
exp_msg = "ERROR: %s\n" % (err_msg)
if (hint_msg != None):
exp_msg += "HINT: %s\n" % (hint_msg)
self.assertEqual(e.message, exp_msg)
else:
self.assertEqual(e.args[0]['M'], err_msg)
# execute SQL query Thread for PostgreSql node's connection
class ThreadQueryExecutor(Thread):
def __init__(self, connection, sql_query):
Thread.__init__(self, target=ThreadQueryExecutor.execute_con, args=(connection, sql_query))
self._return = None
def run(self):
try:
if self._target:
self._return = self._target(*self._args)
finally:
del self._target, self._args
def join(self,timeout=None):
Thread.join(self,timeout)
if isinstance(self._return, Exception):
raise self._return
return self._return
@staticmethod
def execute_con(connection, sql_query):
try:
return connection.execute(sql_query)
except Exception as e:
return e
def generate_string(size, seed = None):
if seed:
random.seed(seed)
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def wait_stopevent(node, blocked_pid):
while node.execute("""SELECT EXISTS(
SELECT se.*
FROM pg_stopevents() se
WHERE se.waiter_pids @> ARRAY[%d]
);""" % (blocked_pid, ))[0][0] == False:
time.sleep(0.1)
continue
# waits for blocking checkpointer process on breakpoint by process with pid = block_pid
def wait_checkpointer_stopevent(node):
checkpointer_pid = None
while checkpointer_pid == None:
select_list = node.execute("SELECT pid FROM pg_stat_activity WHERE backend_type = 'checkpointer';")
# checkpointer may not start yet, check list range
if len(select_list) > 0 and len(select_list[0]) > 0:
checkpointer_pid = select_list[0][0]
wait_stopevent(node, checkpointer_pid)
# waits for blocking bgwriter process on breakpoint by process with pid = block_pid
def wait_bgwriter_stopevent(node):
bgwriter_pid = None
while bgwriter_pid == None:
select_list = node.execute("SELECT pid FROM pg_stat_activity WHERE backend_type = 'orioledb background writer';")
# checkpointer may not start yet, check list range
if len(select_list) > 0 and len(select_list[0]) > 0:
bgwriter_pid = select_list[0][0]
wait_stopevent(node, bgwriter_pid)
|
test/jinja2.test_pytorch.py | hughperkins/pytorch | 513 | 12667558 | <reponame>hughperkins/pytorch<filename>test/jinja2.test_pytorch.py<gh_stars>100-1000
# {{header1}}
# {{header2}}
from __future__ import print_function, division
import PyTorch
import numpy
import inspect
from test.test_helpers import myeval, myexec
{%- set types = [
{'Real': 'Long','real': 'long'},
{'Real': 'Float', 'real': 'float'},
{'Real': 'Double', 'real': 'double'},
{'Real': 'Byte', 'real': 'unsigned char'}
]
%}
{% for typedict in types -%}
{%- set Real = typedict['Real'] -%}
{%- set real = typedict['real'] -%}
def test_pytorch{{Real}}():
PyTorch.manualSeed(123)
numpy.random.seed(123)
{{Real}}Tensor = PyTorch.{{Real}}Tensor
{% if Real == 'Float' -%}
A = numpy.random.rand(6).reshape(3, 2).astype(numpy.float32)
B = numpy.random.rand(8).reshape(2, 4).astype(numpy.float32)
C = A.dot(B)
print('C', C)
print('calling .asTensor...')
tensorA = PyTorch.asFloatTensor(A)
tensorB = PyTorch.asFloatTensor(B)
print(' ... asTensor called')
print('tensorA', tensorA)
tensorA.set2d(1, 1, 56.4)
tensorA.set2d(2, 0, 76.5)
print('tensorA', tensorA)
print('A', A)
print('add 5 to tensorA')
tensorA += 5
print('tensorA', tensorA)
print('A', A)
print('add 7 to tensorA')
tensorA2 = tensorA + 7
print('tensorA2', tensorA2)
print('tensorA', tensorA)
tensorAB = tensorA * tensorB
print('tensorAB', tensorAB)
print('A.dot(B)', A.dot(B))
print('tensorA[2]', tensorA[2])
{% endif -%}
D = PyTorch.{{Real}}Tensor(5, 3).fill(1)
print('D', D)
D[2][2] = 4
print('D', D)
D[3].fill(9)
print('D', D)
D.narrow(1, 2, 1).fill(0)
print('D', D)
{% if Real in ['Float', 'Double'] -%}
print(PyTorch.{{Real}}Tensor(3, 4).uniform())
print(PyTorch.{{Real}}Tensor(3, 4).normal())
print(PyTorch.{{Real}}Tensor(3, 4).cauchy())
print(PyTorch.{{Real}}Tensor(3, 4).exponential())
print(PyTorch.{{Real}}Tensor(3, 4).logNormal())
{% endif -%}
print(PyTorch.{{Real}}Tensor(3, 4).bernoulli())
print(PyTorch.{{Real}}Tensor(3, 4).geometric())
print(PyTorch.{{Real}}Tensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.{{Real}}Tensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.{{Real}}Tensor(3, 4).geometric())
print(type(PyTorch.{{Real}}Tensor(2, 3)))
size = PyTorch.LongStorage(2)
size[0] = 4
size[1] = 3
D.resize(size)
print('D after resize:\n', D)
print('resize1d', PyTorch.{{Real}}Tensor().resize1d(3).fill(1))
print('resize2d', PyTorch.{{Real}}Tensor().resize2d(2, 3).fill(1))
print('resize', PyTorch.{{Real}}Tensor().resize(size).fill(1))
D = PyTorch.{{Real}}Tensor(size).geometric()
# def myeval(expr):
# print(expr, ':', eval(expr))
# def myexec(expr):
# print(expr)
# exec(expr)
myeval('{{Real}}Tensor(3,2).nElement()')
myeval('{{Real}}Tensor().nElement()')
myeval('{{Real}}Tensor(1).nElement()')
A = {{Real}}Tensor(3, 4).geometric(0.9)
myeval('A')
myexec('A += 3')
myeval('A')
myexec('A *= 3')
myeval('A')
{% if Real != 'Byte' -%}
myexec('A -= 3')
{% endif -%}
myeval('A')
{% if Real in ['Float', 'Double'] -%}
print('A /= 3')
A /= 3
{% else -%}
print('A //= 3')
A //= 3
{% endif -%}
myeval('A')
myeval('A + 5')
{% if Real != 'Byte' -%}
myeval('A - 5')
{% endif -%}
myeval('A * 5')
{% if Real in ['Float', 'Double'] -%}
print('A / 2')
A / 2
{% else -%}
print('A // 2')
A // 2
{% endif -%}
B = {{Real}}Tensor().resizeAs(A).geometric(0.9)
myeval('B')
myeval('A + B')
{% if Real != 'Byte' -%}
myeval('A - B')
{% endif -%}
myexec('A += B')
myeval('A')
{% if Real != 'Byte' -%}
myexec('A -= B')
myeval('A')
{%- endif %}
def test_pytorch_{{Real}}_constructors():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
assert(len(a.size()) == 3)
a = {{Real}}Tensor(3, 2, 5, 6)
assert(len(a.size()) == 4)
def test_Pytorch_{{Real}}_operator_plus():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a + b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_{{Real}}_operator_plusequals():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a.clone()
res += b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
{% if Real not in ['Byte'] -%}
def test_Pytorch_{{Real}}_operator_minus():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a - b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
{%- endif %}
{% if Real not in ['Byte'] -%}
def test_Pytorch_{{Real}}_operator_minusequals():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a.clone()
res -= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
{%- endif %}
def test_Pytorch_{{Real}}_cmul():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a.clone() # .cmul(b)
res.cmul(b)
for i in range(3 * 2 * 5):
{% if Real == 'Byte' -%}
assert(abs(res.storage()[i] - ((a.storage()[i] * b.storage()[i])) % 256) < 0.000001)
{% else -%}
assert(abs(res.storage()[i] - (a.storage()[i] * b.storage()[i])) < 0.000001)
{%- endif %}
{% for func in ['abs', 'tanh', 'sigmoid', 'neg', 'cinv'] -%}
{% if Real in ['Double', 'Float'] -%}
# def test_Pytorch_{{Real}}_{{func}}():
# {{Real}}Tensor = PyTorch.{{Real}}Tensor
# a = {{Real}}Tensor(3,2,5)
# {%if Real in ['Float', 'Double'] -%}
# a.uniform()
# {% else -%}
# a.geometric(0.9)
# {% endif -%}
# res = a.clone()
# res.{{func}}()
# for i in range(3*2*5):
# {% if Real == 'Byte' -%}
# assert(abs(res.storage()[i] - ((torch.{{func}}(a.storage()[i]) ) % 256) < 0.000001)
# {% else -%}
# assert(abs(res.storage()[i] - (torch.{{func}}(a.storage()[i]))) < 0.000001)
# {% endif -%}
{%- endif %}
{%- endfor %}
def test_Pytorch_{{Real}}_operator_div():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
{% if Real in ['Float', 'Double'] -%}
res = a / b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
{% else -%}
# res = a / b # whilst this should proably be allowed/implemented, it's not yet...
# for i in range(3*2*5):
# assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
res = a // b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
{%- endif %}
def test_Pytorch_{{Real}}_operator_divequals():
{{Real}}Tensor = PyTorch.{{Real}}Tensor
a = {{Real}}Tensor(3, 2, 5)
b = {{Real}}Tensor(3, 2, 5)
{%if Real in ['Float', 'Double'] -%}
a.uniform()
b.uniform()
{% else -%}
a.geometric(0.9)
b.geometric(0.9)
{% endif -%}
res = a.clone()
{% if Real in ['Float', 'Double'] -%}
res /= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
{% else -%}
res //= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
{%- endif %}
{% endfor %}
if __name__ == '__main__':
{% for typedict in types -%}
{% set Real = typedict['Real'] -%}
{% set real = typedict['real'] -%}
test_pytorch{{Real}}()
{% endfor %}
|
typed_python/compiler/tests/alternative_compilation_test.py | APrioriInvestments/typed_python | 105 | 12667588 | <reponame>APrioriInvestments/typed_python
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python import (
TypeFunction, Int16, UInt64, Float32, Alternative, Forward,
Dict, ConstDict, ListOf, Compiled, OneOf
)
import typed_python._types as _types
from typed_python import Entrypoint
import unittest
import pytest
import time
import psutil
from math import trunc, floor, ceil
class TestAlternativeCompilation(unittest.TestCase):
def test_default_constructor(self):
@Entrypoint
def setIt(d, x):
return d.setdefault(x)
Simple = Alternative("Simple", A={}, B={}, C={})
Complex = Alternative("Complex", A=dict(x=str), B=dict(x=str, y=float), C={})
assert setIt(Dict(int, Simple)(), 10).matches.A
assert setIt(Dict(int, Complex)(), 10).matches.A
def test_simple_alternative_passing(self):
Simple = Alternative("Simple", A={}, B={}, C={})
@Compiled
def f(s: Simple):
y = s
return y
self.assertEqual(f(Simple.A()), Simple.A())
self.assertEqual(f(Simple.B()), Simple.B())
self.assertEqual(f(Simple.C()), Simple.C())
def test_complex_alternative_passing(self):
Complex = Forward("Complex")
Complex = Complex.define(Alternative(
"Complex",
A={'a': str, 'b': int},
B={'a': str, 'c': int},
C={'a': str, 'd': Complex}
))
c = Complex.A(a="hi", b=20)
c2 = Complex.C(a="hi", d=c)
@Compiled
def f(c: Complex):
y = c
return y
self.assertEqual(f(c), c)
self.assertEqual(f(c2), c2)
self.assertEqual(_types.refcount(c), 2)
self.assertEqual(_types.refcount(c2), 1)
def test_construct_alternative(self):
A = Alternative("A", X={'x': int})
@Compiled
def f():
return A.X(x=10)
self.assertTrue(f().matches.X)
self.assertEqual(f().x, 10)
def test_alternative_matches(self):
A = Alternative("A", X={'x': int}, Y={'x': int})
@Compiled
def f(x: A):
return x.matches.X
self.assertTrue(f(A.X()))
self.assertFalse(f(A.Y()))
def test_alternative_member_homogenous(self):
A = Alternative("A", X={'x': int}, Y={'x': int})
@Compiled
def f(x: A):
return x.x
self.assertEqual(f(A.X(x=10)), 10)
self.assertEqual(f(A.Y(x=10)), 10)
def test_alternative_member_diverse(self):
A = Alternative("A", X={'x': int}, Y={'x': float})
@Compiled
def f(x: A):
return x.x
self.assertEqual(f(A.X(x=10)), 10)
self.assertEqual(f(A.Y(x=10.5)), 10.5)
def test_alternative_member_distinct(self):
A = Alternative("A", X={'x': int}, Y={'y': float})
@Compiled
def f(x: A):
if x.matches.X:
return x.x
if x.matches.Y:
return x.y
self.assertEqual(f(A.X(x=10)), 10)
self.assertEqual(f(A.Y(y=10.5)), 10.5)
def test_matching_recursively(self):
@TypeFunction
def Tree(T):
TreeType = Forward("TreeType")
TreeType = TreeType.define(Alternative(
"Tree",
Leaf={'value': T},
Node={'left': TreeType, 'right': TreeType}
))
return TreeType
def treeSum(x: Tree(int)):
matches = x.matches.Leaf
if matches:
return x.value
if x.matches.Node:
return treeSum(x.left) + treeSum(x.right)
return 0
def buildTree(depth: int, offset: int) -> Tree(int):
if depth > 0:
return Tree(int).Node(
left=buildTree(depth-1, offset),
right=buildTree(depth-1, offset+1),
)
return Tree(int).Leaf(value=offset)
aTree = Compiled(buildTree)(15, 0)
treeSumCompiled = Compiled(treeSum)
t0 = time.time()
sum = treeSum(aTree)
t1 = time.time()
sumCompiled = treeSumCompiled(aTree)
t2 = time.time()
self.assertEqual(sum, sumCompiled)
speedup = (t1-t0)/(t2-t1)
self.assertGreater(speedup, 20) # I get about 50
def test_compile_alternative_magic_methods(self):
A = Alternative("A", a={'a': int}, b={'b': str},
__bool__=lambda self: False,
__str__=lambda self: "my str",
__repr__=lambda self: "my repr",
__call__=lambda self, i: "my call",
__len__=lambda self: 42,
__contains__=lambda self, item: item == 1,
__bytes__=lambda self: b'my bytes',
__format__=lambda self, spec: "my format",
__int__=lambda self: 43,
__float__=lambda self: 44.44,
__complex__=lambda self: 3+4j,
__add__=lambda self, other: A.b("add"),
__sub__=lambda self, other: A.b("sub"),
__mul__=lambda self, other: A.b("mul"),
__matmul__=lambda self, other: A.b("matmul"),
__truediv__=lambda self, other: A.b("truediv"),
__floordiv__=lambda self, other: A.b("floordiv"),
__divmod__=lambda self, other: A.b("divmod"),
__mod__=lambda self, other: A.b("mod"),
__pow__=lambda self, other: A.b("pow"),
__lshift__=lambda self, other: A.b("lshift"),
__rshift__=lambda self, other: A.b("rshift"),
__and__=lambda self, other: A.b("and"),
__or__=lambda self, other: A.b("or"),
__xor__=lambda self, other: A.b("xor"),
__iadd__=lambda self, other: A.b("iadd"),
__isub__=lambda self, other: A.b("isub"),
__imul__=lambda self, other: A.b("imul"),
__imatmul__=lambda self, other: A.b("imatmul"),
__itruediv__=lambda self, other: A.b("itruediv"),
__ifloordiv__=lambda self, other: A.b("ifloordiv"),
__imod__=lambda self, other: A.b("imod"),
__ipow__=lambda self, other: A.b("ipow"),
__ilshift__=lambda self, other: A.b("ilshift"),
__irshift__=lambda self, other: A.b("irshift"),
__iand__=lambda self, other: A.b("iand"),
__ior__=lambda self, other: A.b("ior"),
__ixor__=lambda self, other: A.b("ixor"),
__neg__=lambda self: A.b("neg"),
__pos__=lambda self: A.b("pos"),
__invert__=lambda self: A.b("invert"),
__abs__=lambda self: A.b("abs"),
)
def f_bool(x: A):
return bool(x)
def f_str(x: A):
return str(x)
def f_repr(x: A):
return repr(x)
def f_call(x: A):
return x(1)
def f_1in(x: A):
return 1 in x
def f_0in(x: A):
return 0 in x
def f_len(x: A):
return len(x)
def f_int(x: A):
return int(x)
def f_float(x: A):
return float(x)
def f_add(x: A):
return x + A.a()
def f_sub(x: A):
return x - A.a()
def f_mul(x: A):
return x * A.a()
def f_div(x: A):
return x / A.a()
def f_floordiv(x: A):
return x // A.a()
def f_matmul(x: A):
return x @ A.a()
def f_mod(x: A):
return x % A.a()
def f_and(x: A):
return x & A.a()
def f_or(x: A):
return x | A.a()
def f_xor(x: A):
return x ^ A.a()
def f_rshift(x: A):
return x >> A.a()
def f_lshift(x: A):
return x << A.a()
def f_pow(x: A):
return x ** A.a()
def f_neg(x: A):
return -x
def f_pos(x: A):
return +x
def f_invert(x: A):
return ~x
def f_abs(x: A):
return abs(x)
def f_iadd(x: A):
x += A.a()
return x
def f_isub(x: A):
x -= A.a()
return x
def f_imul(x: A):
x *= A.a()
return x
def f_idiv(x: A):
x /= A.a()
return x
def f_ifloordiv(x: A):
x //= A.a()
return x
def f_imatmul(x: A):
x @= A.a()
return x
def f_imod(x: A):
x %= A.a()
return x
def f_iand(x: A):
x &= A.a()
return x
def f_ior(x: A):
x |= A.a()
return x
def f_ixor(x: A):
x ^= A.a()
return x
def f_irshift(x: A):
x >>= A.a()
return x
def f_ilshift(x: A):
x <<= A.a()
return x
def f_ipow(x: A):
x **= A.a()
return x
test_cases = [f_int, f_float, f_bool, f_str, f_repr, f_call, f_0in, f_1in, f_len,
f_add, f_sub, f_mul, f_div, f_floordiv, f_matmul, f_mod, f_and, f_or, f_xor, f_rshift, f_lshift, f_pow,
f_neg, f_pos, f_invert, f_abs,
f_iadd, f_isub, f_imul, f_idiv, f_ifloordiv, f_imatmul,
f_imod, f_iand, f_ior, f_ixor, f_irshift, f_ilshift, f_ipow]
for f in test_cases:
compiled_f = Compiled(f)
r1 = f(A.a())
r2 = compiled_f(A.a())
if r1 != r2:
print("mismatch")
self.assertEqual(r1, r2)
def test_compile_alternative_reverse_methods(self):
A = Alternative("A", a={'a': int}, b={'b': str},
__radd__=lambda self, other: "radd" + repr(other),
__rsub__=lambda self, other: "rsub" + repr(other),
__rmul__=lambda self, other: "rmul" + repr(other),
__rmatmul__=lambda self, other: "rmatmul" + repr(other),
__rtruediv__=lambda self, other: "rtruediv" + repr(other),
__rfloordiv__=lambda self, other: "rfloordiv" + repr(other),
__rmod__=lambda self, other: "rmod" + repr(other),
__rpow__=lambda self, other: "rpow" + repr(other),
__rlshift__=lambda self, other: "rlshift" + repr(other),
__rrshift__=lambda self, other: "rrshift" + repr(other),
__rand__=lambda self, other: "rand" + repr(other),
__rxor__=lambda self, other: "rxor" + repr(other),
__ror__=lambda self, other: "ror" + repr(other),
)
values = [1, Int16(1), UInt64(1), 1.234, Float32(1.234), True, "abc",
ListOf(int)((1, 2)), ConstDict(str, str)({"a": "1"})]
for v in values:
T = type(v)
def f_radd(v: T, x: A):
return v + x
def f_rsub(v: T, x: A):
return v - x
def f_rmul(v: T, x: A):
return v * x
def f_rmatmul(v: T, x: A):
return v @ x
def f_rtruediv(v: T, x: A):
return v * x
def f_rfloordiv(v: T, x: A):
return v * x
def f_rmod(v: T, x: A):
return v * x
def f_rpow(v: T, x: A):
return v * x
def f_rlshift(v: T, x: A):
return v * x
def f_rrshift(v: T, x: A):
return v * x
def f_rand(v: T, x: A):
return v * x
def f_rxor(v: T, x: A):
return v * x
def f_ror(v: T, x: A):
return v * x
for f in [f_radd, f_rsub, f_rmul, f_rmatmul, f_rtruediv, f_rfloordiv, f_rmod, f_rpow,
f_rlshift, f_rrshift, f_rand, f_rxor, f_ror]:
r1 = f(v, A.a())
compiled_f = Compiled(f)
r2 = compiled_f(v, A.a())
self.assertEqual(r1, r2)
def test_compile_alternative_format(self):
A1 = Alternative("A1", a={'a': int}, b={'b': str})
A2 = Alternative("A2", a={'a': int}, b={'b': str},
__str__=lambda self: "my str"
)
A3 = Alternative("A3", a={'a': int}, b={'b': str},
__format__=lambda self, spec: "my format " + spec
)
def a1_format(x: A1):
return format(x)
def a2_format(x: A2):
return format(x)
def a3_format(x: A3):
return format(x)
def a3_format_spec(x: A3):
return format(x, "spec")
r1 = a1_format(A1.a())
c1_format = Compiled(a1_format)
r2 = c1_format(A1.a())
self.assertEqual(r1, r2)
r1 = a2_format(A2.a())
c2_format = Compiled(a2_format)
r2 = c2_format(A2.a())
self.assertEqual(r1, r2)
r1 = a3_format(A3.a())
c3_format = Compiled(a3_format)
r2 = c3_format(A3.a())
self.assertEqual(r1, r2)
r1 = a3_format_spec(A3.a())
c3_format_spec = Compiled(a3_format_spec)
r2 = c3_format_spec(A3.a())
self.assertEqual(r1, r2)
# This failed when I forgot to support ConcreteAlternativeWrappers
@Entrypoint
def specialized_format(x):
return format(x)
test_values = [A1.a(), A1.b(), A2.a(), A2.b(), A3.a(), A3.b()]
for v in test_values:
r1 = format(v)
r2 = specialized_format(v)
self.assertEqual(r1, r2, type(v))
def test_compile_alternative_bytes(self):
A = Alternative("A", a={'a': int}, b={'b': str},
__bytes__=lambda self: b'my bytes'
)
def f_bytes(x: A):
return bytes(x)
v = A.a()
r1 = f_bytes(v)
c_f = Compiled(f_bytes)
r2 = c_f(v)
self.assertEqual(r1, r2)
def test_compile_alternative_attr(self):
def A_getattr(self, n):
return self.d[n]
def A_setattr(self, n, v):
self.d[n] = v
def A_delattr(self, n):
del self.d[n]
A = Alternative("A", a={'d': Dict(str, str), 'i': int},
__getattr__=A_getattr,
__setattr__=A_setattr,
__delattr__=A_delattr
)
def f_getattr1(x: A):
return x.q
def f_getattr2(x: A):
return x.z
def f_setattr1(x: A, s: str):
x.q = s
def f_setattr2(x: A, s: str):
x.z = s
def f_delattr1(x: A):
del x.q
def f_delattr2(x: A):
del x.z
c_getattr1 = Compiled(f_getattr1)
c_getattr2 = Compiled(f_getattr2)
c_setattr1 = Compiled(f_setattr1)
c_setattr2 = Compiled(f_setattr2)
c_delattr1 = Compiled(f_delattr1)
c_delattr2 = Compiled(f_delattr2)
for v in [A.a()]:
f_setattr1(v, "0")
f_setattr2(v, "0")
self.assertEqual(f_getattr1(v), "0")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_setattr1(v, "1")
self.assertEqual(f_getattr1(v), "1")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_setattr1(v, "2")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_setattr2(v, "3")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "3")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_setattr2(v, "4")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "4")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_delattr1(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(KeyError):
c_getattr1(v)
self.assertEqual(f_getattr2(v), "4")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_delattr2(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(KeyError):
c_getattr1(v)
with self.assertRaises(KeyError):
f_getattr2(v)
with self.assertRaises(KeyError):
c_getattr2(v)
f_setattr1(v, "5")
f_setattr2(v, "6")
c_delattr1(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(KeyError):
c_getattr1(v)
self.assertEqual(f_getattr2(v), "6")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_delattr2(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(KeyError):
c_getattr1(v)
with self.assertRaises(KeyError):
f_getattr2(v)
with self.assertRaises(KeyError):
c_getattr2(v)
def test_compile_alternative_float_methods(self):
# if __float__ is defined, then floor() and ceil() are based off this conversion,
# when __floor__ and __ceil__ are not defined
A = Alternative("A", a={'a': int}, b={'b': str},
__float__=lambda self: 1234.5
)
def f_floor(x: A):
return floor(x)
def f_ceil(x: A):
return ceil(x)
test_cases = [f_floor, f_ceil]
for f in test_cases:
r1 = f(A.a())
compiled_f = Compiled(f)
r2 = compiled_f(A.a())
self.assertEqual(r1, r2)
B = Alternative("B", a={'a': int}, b={'b': str},
__round__=lambda self, n: 1234 + n,
__trunc__=lambda self: 1,
__floor__=lambda self: 2,
__ceil__=lambda self: 3
)
def f_round0(x: B):
return round(x, 0)
def f_round1(x: B):
return round(x, 1)
def f_round2(x: B):
return round(x, 2)
def f_round_1(x: B):
return round(x, -1)
def f_round_2(x: B):
return round(x, -2)
def f_trunc(x: B):
return trunc(x)
def f_floor(x: B):
return floor(x)
def f_ceil(x: B):
return ceil(x)
test_cases = [f_round0, f_round1, f_round2, f_round_1, f_round_2, f_trunc, f_floor, f_ceil]
for f in test_cases:
r1 = f(B.a())
compiled_f = Compiled(f)
r2 = compiled_f(B.a())
self.assertEqual(r1, r2)
def test_compile_alternative_dir(self):
# The interpreted dir() calls __dir__() and sorts the result.
# I expected the compiled dir() to do the same thing, but it doesn't sort.
# So if you append these elements out of order, the test will fail.
A0 = Alternative("A", a={'a': int}, b={'b': str})
def A_dir(self):
x = ListOf(str)()
x.append("x")
x.append("y")
x.append("z")
return x
A = Alternative("A", a={'a': int}, b={'b': str},
__dir__=A_dir,
)
def f_dir0(x: A0):
return dir(x)
def f_dir(x: A):
return dir(x)
for f in [f_dir0]:
compiled_f = Compiled(f)
r1 = f(A0.a())
r2 = compiled_f(A0.a())
self.assertEqual(r1, r2)
for f in [f_dir]:
compiled_f = Compiled(f)
r1 = f(A.a())
r2 = compiled_f(A.a())
self.assertEqual(r1, r2)
c0 = Compiled(f_dir0)
c = Compiled(f_dir)
initMem = psutil.Process().memory_info().rss / 1024 ** 2
for i in range(10000):
c0(A0.a(i))
c(A.a(i))
finalMem = psutil.Process().memory_info().rss / 1024 ** 2
self.assertTrue(finalMem < initMem + 2)
def test_compile_alternative_comparison_defaults(self):
B = Alternative("B", a={'a': int}, b={'b': str})
def f_eq(x: B, y: B):
return x == y
def f_ne(x: B, y: B):
return x != y
def f_lt(x: B, y: B):
return x < y
def f_gt(x: B, y: B):
return x > y
def f_le(x: B, y: B):
return x <= y
def f_ge(x: B, y: B):
return x >= y
def f_hash(x: B):
return hash(x)
values = [B.a(0), B.a(1), B.b("a"), B.b("b")]
test_cases = [f_eq, f_ne, f_lt, f_gt, f_le, f_ge]
for f in test_cases:
for v1 in values:
for v2 in values:
compiled_f = Compiled(f)
r1 = f(v1, v2)
r2 = compiled_f(v1, v2)
self.assertEqual(r1, r2)
test_cases = [f_hash]
for f in test_cases:
for v in values:
compiled_f = Compiled(f)
r1 = f(v)
r2 = compiled_f(v)
self.assertEqual(r1, r2)
def test_compile_alternative_comparison_methods(self):
C = Alternative("C", a={'a': int}, b={'b': str},
__eq__=lambda self, other: True,
__ne__=lambda self, other: False,
__lt__=lambda self, other: True,
__gt__=lambda self, other: False,
__le__=lambda self, other: True,
__ge__=lambda self, other: False,
__hash__=lambda self: 123,
)
def f_eq(x: C):
return x == C.a()
def f_ne(x: C):
return x != C.a()
def f_lt(x: C):
return x < C.a()
def f_gt(x: C):
return x > C.a()
def f_le(x: C):
return x <= C.a()
def f_ge(x: C):
return x >= C.a()
def f_hash(x: C):
return hash(x)
test_cases = [f_eq, f_ne, f_lt, f_gt, f_le, f_ge, f_hash]
for f in test_cases:
compiled_f = Compiled(f)
r1 = f(C.a())
r2 = compiled_f(C.a())
self.assertEqual(r1, r2)
def test_compile_alternative_getsetitem(self):
def A2_getitem(self, i):
if i not in self.d:
return i
return self.d[i]
def A2_setitem(self, i, v):
self.d[i] = v
A2 = Alternative("A2", d={'d': Dict(int, int)},
__getitem__=A2_getitem,
__setitem__=A2_setitem
)
def f_getitem(a: A2, i: int) -> int:
return a[i]
def f_setitem(a: A2, i: int, v: int):
a[i] = v
c_getitem = Compiled(f_getitem)
c_setitem = Compiled(f_setitem)
a = A2.d()
a[123] = 7
self.assertEqual(a[123], 7)
for i in range(10, 20):
self.assertEqual(f_getitem(a, i), i)
self.assertEqual(c_getitem(a, i), i)
f_setitem(a, i, i + 100)
self.assertEqual(f_getitem(a, i), i + 100)
self.assertEqual(c_getitem(a, i), i + 100)
c_setitem(a, i, i + 200)
self.assertEqual(f_getitem(a, i), i + 200)
self.assertEqual(c_getitem(a, i), i + 200)
def test_compile_simple_alternative_magic_methods(self):
A = Alternative("A", a={}, b={},
__bool__=lambda self: False,
__str__=lambda self: "my str",
__repr__=lambda self: "my repr",
__call__=lambda self, i: "my call",
__len__=lambda self: 42,
__contains__=lambda self, item: item == 1,
__bytes__=lambda self: b'my bytes',
__format__=lambda self, spec: "my format",
__int__=lambda self: 43,
__float__=lambda self: 44.44,
__complex__=lambda self: 3+4j,
__add__=lambda self, other: "add",
__sub__=lambda self, other: "sub",
__mul__=lambda self, other: "mul",
__matmul__=lambda self, other: "matmul",
__truediv__=lambda self, other: "truediv",
__floordiv__=lambda self, other: "floordiv",
__divmod__=lambda self, other: "divmod",
__mod__=lambda self, other: "mod",
__pow__=lambda self, other: "pow",
__lshift__=lambda self, other: "lshift",
__rshift__=lambda self, other: "rshift",
__and__=lambda self, other: "and",
__or__=lambda self, other: "or",
__xor__=lambda self, other: "xor",
__iadd__=lambda self, other: "iadd",
__isub__=lambda self, other: "isub",
__imul__=lambda self, other: "imul",
__imatmul__=lambda self, other: "imatmul",
__itruediv__=lambda self, other: "itruediv",
__ifloordiv__=lambda self, other: "ifloordiv",
__imod__=lambda self, other: "imod",
__ipow__=lambda self, other: "ipow",
__ilshift__=lambda self, other: "ilshift",
__irshift__=lambda self, other: "irshift",
__iand__=lambda self, other: "iand",
__ior__=lambda self, other: "ior",
__ixor__=lambda self, other: "ixor",
__neg__=lambda self: "neg",
__pos__=lambda self: "pos",
__invert__=lambda self: "invert",
__abs__=lambda self: "abs",
)
def f_bool(x: A):
return bool(x)
def f_str(x: A):
return str(x)
def f_repr(x: A):
return repr(x)
def f_call(x: A):
return x(1)
def f_1in(x: A):
return 1 in x
def f_0in(x: A):
return 0 in x
def f_len(x: A):
return len(x)
def f_int(x: A):
return int(x)
def f_float(x: A):
return float(x)
def f_add(x: A):
return x + A.a()
def f_sub(x: A):
return x - A.a()
def f_mul(x: A):
return x * A.a()
def f_div(x: A):
return x / A.a()
def f_floordiv(x: A):
return x // A.a()
def f_matmul(x: A):
return x @ A.a()
def f_mod(x: A):
return x % A.a()
def f_and(x: A):
return x & A.a()
def f_or(x: A):
return x | A.a()
def f_xor(x: A):
return x ^ A.a()
def f_rshift(x: A):
return x >> A.a()
def f_lshift(x: A):
return x << A.a()
def f_pow(x: A):
return x ** A.a()
def f_neg(x: A):
return -x
def f_pos(x: A):
return +x
def f_invert(x: A):
return ~x
def f_abs(x: A):
return abs(x)
def f_iadd(x: A):
x += A.a()
return x
def f_isub(x: A):
x -= A.a()
return x
def f_imul(x: A):
x *= A.a()
return x
def f_idiv(x: A):
x /= A.a()
return x
def f_ifloordiv(x: A):
x //= A.a()
return x
def f_imatmul(x: A):
x @= A.a()
return x
def f_imod(x: A):
x %= A.a()
return x
def f_iand(x: A):
x &= A.a()
return x
def f_ior(x: A):
x |= A.a()
return x
def f_ixor(x: A):
x ^= A.a()
return x
def f_irshift(x: A):
x >>= A.a()
return x
def f_ilshift(x: A):
x <<= A.a()
return x
def f_ipow(x: A):
x **= A.a()
return x
test_cases = [f_int, f_float, f_bool, f_str, f_repr, f_call, f_0in, f_1in, f_len,
f_add, f_sub, f_mul, f_div, f_floordiv, f_matmul, f_mod, f_and, f_or, f_xor, f_rshift, f_lshift, f_pow,
f_neg, f_pos, f_invert, f_abs]
# not supported:
# [f_iadd, f_isub, f_imul, f_idiv, f_ifloordiv, f_imatmul,
# f_imod, f_iand, f_ior, f_ixor, f_irshift, f_ilshift, f_ipow]
for f in test_cases:
compiled_f = Compiled(f)
r1 = f(A.a())
r2 = compiled_f(A.a())
self.assertEqual(r1, r2)
def test_compile_simple_alternative_reverse_methods(self):
A = Alternative("A", a={}, b={},
__radd__=lambda self, other: "radd" + repr(other),
__rsub__=lambda self, other: "rsub" + repr(other),
__rmul__=lambda self, other: "rmul" + repr(other),
__rmatmul__=lambda self, other: "rmatmul" + repr(other),
__rtruediv__=lambda self, other: "rtruediv" + repr(other),
__rfloordiv__=lambda self, other: "rfloordiv" + repr(other),
__rmod__=lambda self, other: "rmod" + repr(other),
__rpow__=lambda self, other: "rpow" + repr(other),
__rlshift__=lambda self, other: "rlshift" + repr(other),
__rrshift__=lambda self, other: "rrshift" + repr(other),
__rand__=lambda self, other: "rand" + repr(other),
__rxor__=lambda self, other: "rxor" + repr(other),
__ror__=lambda self, other: "ror" + repr(other),
)
values = [1, Int16(1), UInt64(1), 1.234, Float32(1.234), True, "abc",
ListOf(int)((1, 2)), ConstDict(str, str)({"a": "1"})]
for v in values:
T = type(v)
def f_radd(v: T, x: A):
return v + x
def f_rsub(v: T, x: A):
return v - x
def f_rmul(v: T, x: A):
return v * x
def f_rmatmul(v: T, x: A):
return v @ x
def f_rtruediv(v: T, x: A):
return v * x
def f_rfloordiv(v: T, x: A):
return v * x
def f_rmod(v: T, x: A):
return v * x
def f_rpow(v: T, x: A):
return v * x
def f_rlshift(v: T, x: A):
return v * x
def f_rrshift(v: T, x: A):
return v * x
def f_rand(v: T, x: A):
return v * x
def f_rxor(v: T, x: A):
return v * x
def f_ror(v: T, x: A):
return v * x
for f in [f_radd, f_rsub, f_rmul, f_rmatmul, f_rtruediv, f_rfloordiv, f_rmod, f_rpow,
f_rlshift, f_rrshift, f_rand, f_rxor, f_ror]:
r1 = f(v, A.a())
compiled_f = Compiled(f)
r2 = compiled_f(v, A.a())
self.assertEqual(r1, r2)
def test_compile_simple_alternative_format(self):
A1 = Alternative("A1", a={}, b={})
A2 = Alternative("A2", a={}, b={},
__str__=lambda self: "my str"
)
A3 = Alternative("A3", a={}, b={},
__format__=lambda self, spec: "my format " + spec
)
def a1_format(x: A1):
return format(x)
def a2_format(x: A2):
return format(x)
def a3_format(x: A3):
return format(x)
def a3_format_spec(x: A3):
return format(x, "spec")
r1 = a1_format(A1.a())
c1_format = Compiled(a1_format)
r2 = c1_format(A1.a())
self.assertEqual(r1, r2)
r1 = a2_format(A2.a())
c2_format = Compiled(a2_format)
r2 = c2_format(A2.a())
self.assertEqual(r1, r2)
r1 = a3_format(A3.a())
c3_format = Compiled(a3_format)
r2 = c3_format(A3.a())
self.assertEqual(r1, r2)
r1 = a3_format_spec(A3.a())
c3_format_spec = Compiled(a3_format_spec)
r2 = c3_format_spec(A3.a())
self.assertEqual(r1, r2)
# This failed when I forgot to support ConcreteAlternativeWrappers
@Entrypoint
def specialized_format(x):
return format(x)
test_values = [A1.a(), A1.b(), A2.a(), A2.b(), A3.a(), A3.b()]
for v in test_values:
r1 = format(v)
r2 = specialized_format(v)
self.assertEqual(r1, r2)
def test_compile_simple_alternative_bytes(self):
A = Alternative("A", a={}, b={},
__bytes__=lambda self: b'my bytes'
)
def f_bytes(x: A):
return bytes(x)
v = A.a()
r1 = f_bytes(v)
c_f = Compiled(f_bytes)
r2 = c_f(v)
self.assertEqual(r1, r2)
# I think this would require nonlocal data
@pytest.mark.skip(reason="not supported")
def test_compile_simple_alternative_attr(self):
def A_getattr(self, n):
return self.d[n]
def A_setattr(self, n, v):
self.d[n] = v
def A_delattr(self, n):
del self.d[n]
A = Alternative("A", a={}, b={},
__getattr__=A_getattr,
__setattr__=A_setattr,
__delattr__=A_delattr
)
def f_getattr1(x: A):
return x.q
def f_getattr2(x: A):
return x.z
def f_setattr1(x: A, s: str):
x.q = s
def f_setattr2(x: A, s: str):
x.z = s
def f_delattr1(x: A):
del x.q
def f_delattr2(x: A):
del x.z
c_getattr1 = Compiled(f_getattr1)
c_getattr2 = Compiled(f_getattr2)
c_setattr1 = Compiled(f_setattr1)
c_setattr2 = Compiled(f_setattr2)
c_delattr1 = Compiled(f_delattr1)
c_delattr2 = Compiled(f_delattr2)
for v in [A.a()]:
f_setattr1(v, "0")
f_setattr2(v, "0")
self.assertEqual(f_getattr1(v), "0")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_setattr1(v, "1")
self.assertEqual(f_getattr1(v), "1")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_setattr1(v, "2")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "0")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_setattr2(v, "3")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "3")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_setattr2(v, "4")
self.assertEqual(f_getattr1(v), "2")
self.assertEqual(f_getattr1(v), c_getattr1(v))
self.assertEqual(f_getattr2(v), "4")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_delattr1(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(KeyError):
c_getattr1(v)
self.assertEqual(f_getattr2(v), "4")
self.assertEqual(f_getattr2(v), c_getattr2(v))
f_delattr2(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(TypeError):
c_getattr1(v)
with self.assertRaises(KeyError):
f_getattr2(v)
with self.assertRaises(TypeError):
c_getattr2(v)
f_setattr1(v, "5")
f_setattr2(v, "6")
c_delattr1(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(TypeError):
c_getattr1(v)
self.assertEqual(f_getattr2(v), "6")
self.assertEqual(f_getattr2(v), c_getattr2(v))
c_delattr2(v)
with self.assertRaises(KeyError):
f_getattr1(v)
with self.assertRaises(TypeError):
c_getattr1(v)
with self.assertRaises(KeyError):
f_getattr2(v)
with self.assertRaises(TypeError):
c_getattr2(v)
def test_compile_simple_alternative_float_methods(self):
# if __float__ is defined, then floor() and ceil() are based off this conversion,
# when __floor__ and __ceil__ are not defined
A = Alternative("A", a={}, b={},
__float__=lambda self: 1234.5
)
def f_floor(x: A):
return floor(x)
def f_ceil(x: A):
return ceil(x)
test_cases = [f_floor, f_ceil]
for f in test_cases:
r1 = f(A.a())
compiled_f = Compiled(f)
r2 = compiled_f(A.a())
self.assertEqual(r1, r2)
B = Alternative("B", a={}, b={},
__round__=lambda self, n: 1234 + n,
__trunc__=lambda self: 1,
__floor__=lambda self: 2,
__ceil__=lambda self: 3
)
def f_round0(x: B):
return round(x, 0)
def f_round1(x: B):
return round(x, 1)
def f_round2(x: B):
return round(x, 2)
def f_round_1(x: B):
return round(x, -1)
def f_round_2(x: B):
return round(x, -2)
def f_trunc(x: B):
return trunc(x)
def f_floor(x: B):
return floor(x)
def f_ceil(x: B):
return ceil(x)
test_cases = [f_round0, f_round1, f_round2, f_round_1, f_round_2, f_trunc, f_floor, f_ceil]
for f in test_cases:
r1 = f(B.a())
compiled_f = Compiled(f)
r2 = compiled_f(B.a())
self.assertEqual(r1, r2)
def test_compile_simple_dir(self):
# The interpreted dir() calls __dir__() and sorts the result.
# I expected the compiled dir() to do the same thing, but it doesn't sort.
# So if you append these elements out of order, the test will fail.
A0 = Alternative("A", a={}, b={})
def A_dir(self):
x = ListOf(str)()
x.append("x")
x.append("y")
x.append("z")
return x
A = Alternative("A", a={}, b={},
__dir__=A_dir,
)
def f_dir0(x: A0):
return dir(x)
def f_dir(x: A):
return dir(x)
for f in [f_dir0]:
compiled_f = Compiled(f)
r1 = f(A0.a())
r2 = compiled_f(A0.a())
self.assertEqual(r1, r2)
for f in [f_dir]:
compiled_f = Compiled(f)
r1 = f(A.a())
r2 = compiled_f(A.a())
self.assertEqual(r1, r2)
c0 = Compiled(f_dir0)
c = Compiled(f_dir)
initMem = psutil.Process().memory_info().rss / 1024 ** 2
for i in range(10000):
c0(A0.a())
c(A.a())
finalMem = psutil.Process().memory_info().rss / 1024 ** 2
self.assertTrue(finalMem < initMem + 2)
def test_compile_simple_alternative_comparison_defaults(self):
B = Alternative("B", a={}, b={})
def f_eq(x: B, y: B):
return x == y
def f_ne(x: B, y: B):
return x != y
def f_lt(x: B, y: B):
return x < y
def f_gt(x: B, y: B):
return x > y
def f_le(x: B, y: B):
return x <= y
def f_ge(x: B, y: B):
return x >= y
def f_hash(x: B):
return hash(x)
values = [B.a(), B.b()]
test_cases = [f_eq, f_ne, f_lt, f_gt, f_le, f_ge]
for f in test_cases:
for v1 in values:
for v2 in values:
compiled_f = Compiled(f)
r1 = f(v1, v2)
r2 = compiled_f(v1, v2)
self.assertEqual(r1, r2)
test_cases = [f_hash]
for f in test_cases:
for v in values:
compiled_f = Compiled(f)
r1 = f(v)
r2 = compiled_f(v)
self.assertEqual(r1, r2)
def test_compile_simple_alternative_comparison_methods(self):
C = Alternative("C", a={}, b={},
__eq__=lambda self, other: True,
__ne__=lambda self, other: False,
__lt__=lambda self, other: True,
__gt__=lambda self, other: False,
__le__=lambda self, other: True,
__ge__=lambda self, other: False,
__hash__=lambda self: 123,
)
def f_eq(x: C):
return x == C.a()
def f_ne(x: C):
return x != C.a()
def f_lt(x: C):
return x < C.a()
def f_gt(x: C):
return x > C.a()
def f_le(x: C):
return x <= C.a()
def f_ge(x: C):
return x >= C.a()
def f_hash(x: C):
return hash(x)
test_cases = [f_eq, f_ne, f_lt, f_gt, f_le, f_ge, f_hash]
for f in test_cases:
compiled_f = Compiled(f)
r1 = f(C.a())
r2 = compiled_f(C.a())
self.assertEqual(r1, r2)
def test_compile_alternative_float_conv(self):
A0 = Alternative("A0", a={}, b={},
__int__=lambda self: 123,
__float__=lambda self: 1234.5
)
A = Alternative("A", a={'a': int}, b={'b': str},
__int__=lambda self: 123,
__float__=lambda self: 1234.5
)
def f(x: float):
return x
def g(x: int):
return x
c_f = Compiled(f)
c_g = Compiled(g)
with self.assertRaises(TypeError):
c_f(A.a())
with self.assertRaises(TypeError):
c_f(A0.a())
with self.assertRaises(TypeError):
c_g(A.a())
with self.assertRaises(TypeError):
c_g(A0.a())
def test_compile_alternative_missing_inplace_fallback(self):
def A_add(self, other):
return A.b(" add" + other.b)
def A_sub(self, other):
return A.b(" sub" + other.b)
def A_mul(self, other):
self.s += " mul" + other.s
return self
def A_matmul(self, other):
self.s += " matmul" + other.s
return self
def A_truediv(self, other):
self.s += " truediv" + other.s
return self
def A_floordiv(self, other):
self.s += " floordiv" + other.s
return self
def A_mod(self, other):
self.s += " mod" + other.s
return self
def A_pow(self, other):
self.s += " pow" + other.s
return self
def A_lshift(self, other):
self.s += " lshift" + other.s
return self
def A_rshift(self, other):
self.s += " rshift" + other.s
return self
def A_and(self, other):
self.s += " and" + other.s
return self
def A_or(self, other):
self.s += " or" + other.s
return self
def A_xor(self, other):
self.s += " xor" + other.s
return self
A = Alternative("A", a={'a': int}, b={'b': str},
__add__=lambda x, y: A.b(x.b + " add" + y.b),
__sub__=lambda x, y: A.b(x.b + " sub" + y.b),
__mul__=lambda x, y: A.b(x.b + " mul" + y.b),
__matmul__=lambda x, y: A.b(x.b + " matmul" + y.b),
__truediv__=lambda x, y: A.b(x.b + " truediv" + y.b),
__floordiv__=lambda x, y: A.b(x.b + " floordiv" + y.b),
__mod__=lambda x, y: A.b(x.b + " mod" + y.b),
__pow__=lambda x, y: A.b(x.b + " pow" + y.b),
__lshift__=lambda x, y: A.b(x.b + " lshift" + y.b),
__rshift__=lambda x, y: A.b(x.b + " rshift" + y.b),
__and__=lambda x, y: A.b(x.b + " and" + y.b),
__or__=lambda x, y: A.b(x.b + " or" + y.b),
__xor__=lambda x, y: A.b(x.b + " xor" + y.b)
)
def inplace(x: A):
x += A.b()
x -= A.b()
x *= A.b()
x @= A.b()
x /= A.b()
x //= A.b()
x %= A.b()
x **= A.b()
x <<= A.b()
x >>= A.b()
x &= A.b()
x |= A.b()
x ^= A.b()
return x
expected = A.b("start add sub mul matmul truediv floordiv mod pow lshift rshift and or xor")
v = A.b("start")
r1 = inplace(v)
self.assertEqual(r1, expected)
v = A.b("start")
r2 = Compiled(inplace)(v)
self.assertEqual(r2, expected)
def test_compile_alternative_methods(self):
def method(self, x):
return self.y + x
A = Alternative(
"A",
Y=dict(y=int),
method=method,
)
def callMethod(a: A, x):
return a.method(x)
self.assertEqual(
callMethod(A.Y(y=20), 10),
Entrypoint(callMethod)(A.Y(y=20), 10)
)
def callMethod2(a: A.Y, x):
return a.method(x)
self.assertEqual(
callMethod2(A.Y(y=20), 10),
Entrypoint(callMethod2)(A.Y(y=20), 10)
)
@pytest.mark.skip(reason="not supported")
def test_context_manager(self):
def A_enter(self):
self.a.append("enter")
return "target"
def A_exit(self, exc_type, exc_val, exc_tb):
self.a.append("exit")
self.a.append(str(exc_type))
return True
A = Alternative("A", a={'a': ListOf(str)},
__enter__=A_enter,
__exit__=A_exit
)
def f0(x: int) -> ListOf(str):
context_manager = A.a()
with context_manager:
context_manager.a.append(str(1 / x))
return context_manager.a
def f(x: int) -> ListOf(str):
context_manager = A.a()
with context_manager as v:
context_manager.a.append(v)
context_manager.a.append(str(1 / x))
return context_manager.a
class ConMan():
def __init__(self):
self.a = []
def __enter__(self):
self.a.append("Enter")
return "Target"
def __exit__(self, exc_type, exc_val, exc_tb):
self.a.append("Exit")
if exc_type is not None:
self.a.append(str(exc_type))
if exc_val is not None:
self.a.append(str(exc_val))
if exc_tb is not None:
self.a.append(str(exc_tb))
return True
def g(x: int) -> ListOf(str):
context_manager = ConMan()
with context_manager as v:
context_manager.a.append(v)
context_manager.a.append(str(1 / x))
return context_manager.a
for fn in [f, g]:
c_fn = Compiled(fn)
for v in [0, 1]:
r0 = fn(v)
r1 = c_fn(v)
self.assertEqual(r0, r1)
def test_matches_on_alternative(self):
A = Alternative("A", X=dict(x=int))
@Entrypoint
def checkMatchesX(x):
return x.matches.X
assert checkMatchesX(A.X())
def test_matches_on_oneof_alternative(self):
A = Alternative("A", X=dict(x=int))
B = Alternative("B", Y=dict(y=int))
@Entrypoint
def checkMatchesX(x: OneOf(A, B, int)):
return x.matches.X
assert checkMatchesX(A.X())
assert not checkMatchesX(B.Y())
|
test/integration/test_object_labels.py | jonaslalin/label-maker | 428 | 12667589 | <gh_stars>100-1000
"""Test that the following CLI command returns the expected outputs
label-maker labels -d integration-od -c test/fixtures/integration/config.integration.object_detection.json"""
import unittest
from os import makedirs
from shutil import copyfile, rmtree
import subprocess
import numpy as np
class TestObjectDetectionLabel(unittest.TestCase):
"""Tests for object detection label creation"""
@classmethod
def setUpClass(cls):
makedirs('integration-od')
copyfile('test/fixtures/integration/portugal-z17.mbtiles', 'integration-od/portugal-z17.mbtiles')
@classmethod
def tearDownClass(cls):
rmtree('integration-od')
def test_cli(self):
"""Verify labels.npz produced by CLI"""
cmd = 'label-maker labels -d integration-od -c test/fixtures/integration/config.integration.object_detection.json'
cmd = cmd.split(' ')
subprocess.run(cmd, universal_newlines=True)
labels = np.load("integration-od/labels.npz")
expected_bboxes = dict()
expected_bboxes['62092-50162-17'] = np.empty((0, 5))
expected_bboxes['62092-50163-17'] = np.array([
[209, 192, 255, 255, 6], [253, 251, 255, 255, 6]
])
expected_bboxes['62092-50164-17'] = np.array([
[209, 0, 250, 28, 6], [242, 0, 255, 28, 6],
[222, 13, 235, 66, 6], [87, 20, 250, 255, 6]
])
expected_bboxes['62093-50162-17'] = np.array([
[81, 145, 128, 255, 6], [124, 0, 218, 255, 6],
[207, 0, 247, 153, 6], [140, 108, 193, 255, 6],
[125, 236, 152, 255, 6], [162, 177, 176, 216, 6],
[170, 151, 214, 179, 6], [141, 166, 244, 255, 6],
[203, 88, 255, 186, 6]
])
expected_bboxes['62093-50163-17'] = np.array([
[81, 0, 125, 15, 6], [117, 0, 133, 17, 6],
[119, 0, 151, 36, 6], [125, 0, 140, 7, 6],
[141, 0, 187, 7, 6], [64, 32, 91, 60, 4],
[84, 50, 106, 64, 6], [111, 9, 127, 26, 6],
[111, 18, 127, 35, 6], [84, 15, 119, 52, 6],
[74, 6, 129, 69, 5], [93, 24, 123, 46, 6],
[88, 27, 127, 93, 6], [0, 85, 96, 213, 6],
[0, 85, 96, 255, 6], [115, 38, 255, 100, 6]
])
expected_bboxes['62094-50162-17'] = np.array([
[67, 0, 172, 248, 6], [0, 172, 90, 255, 6],
[91, 170, 255, 227, 6]
])
expected_bboxes['62093-50164-17'] = np.array([
[0, 0, 12, 22, 6], [207, 158, 255, 195, 6]
])
expected_bboxes['62094-50163-17'] = np.array([
[73, 0, 255, 78, 6], [30, 166, 60, 196, 1],
[30, 166, 60, 196, 2], [203, 129, 255, 255, 6],
[0, 90, 255, 138, 6]
])
expected_bboxes['62094-50164-17'] = np.array([
[158, 0, 216, 82, 6], [0, 108, 147, 173, 6],
[139, 74, 254, 143, 6], [240, 90, 255, 232, 6]
])
self.assertEqual(len(labels.files), len(expected_bboxes.keys())) # First check the number of tiles
for tile in labels.files:
self.assertTrue(np.array_equal(expected_bboxes[tile], labels[tile])) # Now, bboxes
|
reproducibility.py | liaopeiyuan/ml-arsenal-public | 280 | 12667592 | from dependencies import random, np, torch, os
from settings import *
print("Fixing random seed for reproducibility...")
SEED = 35202 #123 #35202 #int(time.time()) #
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print ('\tSetting random seed to {}.'.format(SEED))
print('')
torch.backends.cudnn.benchmark = True ##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. -
torch.backends.cudnn.enabled = True
print ('Setting CUDA environment...')
print ('\ttorch.__version__ =', torch.__version__)
print ('\ttorch.version.cuda =', torch.version.cuda)
print ('\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version())
os.environ['CUDA_VISIBLE_DEVICES']=CUDA_DEVICES
if MODE=='cpu':
print("Warning: you've set the mode to CPU; \nthe code won't run on NVIDIA GPU even the CUDA and CUDNN queries are successful.")
try:
print ('\tos[\'CUDA_VISIBLE_DEVICES\'] =',os.environ['CUDA_VISIBLE_DEVICES'])
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
print ('\tos[\'CUDA_VISIBLE_DEVICES\'] =','None')
NUM_CUDA_DEVICES = 1
print ('\ttorch.cuda.device_count() =', torch.cuda.device_count())
print('') |
atlas/backbone2d.py | GainiK/Atlas-1 | 1,571 | 12667603 | <reponame>GainiK/Atlas-1
# from Detectron2: (https://github.com/facebookresearch/detectron2)
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, get_norm
from detectron2.modeling.backbone import build_backbone as d2_build_backbone
import fvcore.nn.weight_init as weight_init
def build_backbone2d(cfg):
""" Builds 2D feature extractor backbone network from Detectron2."""
output_dim = cfg.MODEL.BACKBONE3D.CHANNELS[0]
norm = cfg.MODEL.FPN.NORM
output_stride = 4 # TODO: make configurable
backbone = d2_build_backbone(cfg)
feature_extractor = FPNFeature(
backbone.output_shape(), output_dim, output_stride, norm)
# load pretrained backbone
if cfg.MODEL.BACKBONE.WEIGHTS:
state_dict = torch.load(cfg.MODEL.BACKBONE.WEIGHTS)
backbone.load_state_dict(state_dict)
return nn.Sequential(backbone, feature_extractor), output_stride
class FPNFeature(nn.Module):
""" Converts feature pyrimid to singe feature map (from Detectron2)"""
def __init__(self, input_shape, output_dim=32, output_stride=4, norm='BN'):
super().__init__()
# fmt: off
self.in_features = ["p2", "p3", "p4", "p5"]
feature_strides = {k: v.stride for k, v in input_shape.items()}
feature_channels = {k: v.channels for k, v in input_shape.items()}
# fmt: on
self.scale_heads = []
for in_feature in self.in_features:
head_ops = []
head_length = max(
1, int(np.log2(feature_strides[in_feature]) - np.log2(output_stride))
)
for k in range(head_length):
conv = Conv2d(
feature_channels[in_feature] if k == 0 else output_dim,
output_dim,
kernel_size=3,
stride=1,
padding=1,
bias=not norm,
norm=get_norm(norm, output_dim),
activation=F.relu,
)
weight_init.c2_msra_fill(conv)
head_ops.append(conv)
if feature_strides[in_feature] != output_stride:
head_ops.append(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
)
self.scale_heads.append(nn.Sequential(*head_ops))
self.add_module(in_feature, self.scale_heads[-1])
def forward(self, features):
for i, f in enumerate(self.in_features):
if i == 0:
x = self.scale_heads[i](features[f])
else:
x = x + self.scale_heads[i](features[f])
return x
|
octohatrack.py | LABHR/octohatrack | 143 | 12667652 | <reponame>LABHR/octohatrack
import octohatrack
octohatrack.main()
|
events/forms.py | buketkonuk/pythondotorg | 911 | 12667663 | <reponame>buketkonuk/pythondotorg
from django import forms
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template import loader
def set_placeholder(value):
return forms.TextInput(attrs={'placeholder': value, 'required': 'required'})
class EventForm(forms.Form):
event_name = forms.CharField(widget=set_placeholder(
'Name of the event (including the user group name for '
'user group events)'
))
event_type = forms.CharField(widget=set_placeholder(
'conference, bar camp, sprint, user group meeting, etc.'
))
python_focus = forms.CharField(widget=set_placeholder(
'Data analytics, Web Development, Country-wide conference, etc...'
))
expected_attendees = forms.CharField(widget=set_placeholder('300+'))
location = forms.CharField(widget=set_placeholder(
'IFEMA building, Madrid, Spain'
))
date_from = forms.DateField(widget=forms.SelectDateWidget())
date_to = forms.DateField(widget=forms.SelectDateWidget())
recurrence = forms.CharField(widget=set_placeholder(
'None, every second Thursday, monthly, etc.'
))
link = forms.URLField(label='Website URL')
description = forms.CharField(widget=forms.Textarea)
def send_email(self, creator):
context = {
'event': self.cleaned_data,
'creator': creator,
'site': Site.objects.get_current(),
}
text_message_template = loader.get_template('events/email/new_event.txt')
text_message = text_message_template.render(context)
send_mail(
subject='New event submission: "{}"'.format(self.cleaned_data['event_name']),
message=text_message,
from_email=creator.email,
recipient_list=[settings.EVENTS_TO_EMAIL],
)
|
pudzu/sandbox/markov.py | Udzu/pudzu | 119 | 12667702 | import argparse
import bisect
import functools
import itertools
import operator as op
import pickle
import random
import string
import sys
import unicodedata
from collections import Counter
# Simple Markov n-gram based generator.
def generate_ngrams(iterable, n):
"""Generator that yields n-grams from a sequence."""
return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])
def counter_random(counter, filter=None):
"""Return a single random elements from the Counter collection, weighted by count."""
if filter is not None:
counter = {k: v for k, v in counter.items() if filter(k)}
if len(counter) == 0:
raise Exception("No matching elements in Counter collection")
seq = list(counter.keys())
cum = list(itertools.accumulate(list(counter.values()), op.add))
return seq[bisect.bisect_left(cum, random.uniform(0, cum[-1]))]
def latin_normalise(i, letters=string.ascii_letters + " ", lowercase=True):
"""Example normalisation function that strips everything apart from letters and spaces (even accents)."""
return (nc for c in i for cc in (c.lower() if lowercase else c) for nc in (cc if cc in letters else unicodedata.normalize("NFKD", cc)) if nc in letters)
class MarkovGenerator(object):
"""Markov Chain n-gram-based generator for arbitrary iterables."""
def __init__(self, order):
"""Initialise generator for a given n-gram order."""
self.n = order
self.markov_dict = {}
self.prob_dict = Counter()
def reset(self):
"""Reset generator."""
self.__init__(self.n)
def train(self, iterable):
"""Train generator on an iterable."""
for ngram in generate_ngrams(iterable, self.n + 1):
self.markov_dict.setdefault(ngram[: self.n], Counter()).update([ngram[self.n]])
self.prob_dict.update([ngram[: self.n]])
def train_file(self, filename, encoding="utf-8", convert=itertools.chain.from_iterable, normalise=lambda i: i):
"""Train generator on a file. Accepts optional convert function (defaults to reading characters) and
normalise function (defaults to the identity)."""
with open(filename, "r", encoding=encoding) as f:
self.train(normalise(convert(f)))
def render(self, stop_when, start_ngram=None):
"""Return a tuple using the trained probabilities. Stop condition can be a maximum length or function."""
stop_fn = stop_when if callable(stop_when) else lambda o: len(o) >= stop_when
start_fn = start_ngram if (callable(start_ngram) or start_ngram is None) else lambda n: n == tuple(start_ngram)
ngram = counter_random(self.prob_dict, filter=start_fn)
output = ngram
while True:
if stop_fn(output):
break
elif ngram in self.markov_dict:
v = counter_random(self.markov_dict[ngram])
output += (v,)
ngram = ngram[1:] + (v,)
else:
ngram = counter_random(self.prob_dict)
return output
def render_word(self, min_length=3, max_length=12):
"""Generates a word. Assumes training on characters including spaces.
Doesn't filter out real words."""
while True:
word = "".join(self.render(lambda o: len(o) > 1 and o[-1] == " ", lambda n: n[0] == " "))
if min_length <= len(word.strip()) <= max_length:
return word.strip()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate pseudowords using Markov chains")
parser.add_argument("corpus", type=str, help="text corpus name")
parser.add_argument("number", type=int, help="number of words to generate")
parser.add_argument("-n", "--order", type=int, help="n-gram order [2]", default=2)
parser.add_argument("-l", "--letters", type=str, help="letters to keep [a-z/A-Z]", default=string.ascii_letters)
parser.add_argument("-c", "--casesensitive", action="store_true", help="case sensitive generator [False]")
parser.add_argument("-r", "--regenerate", action="store_true", help="always regenerate generator [False]")
args = parser.parse_args()
pickled_dict = "{}_{}.p".format(args.corpus, args.order)
try:
if args.regenerate:
raise FileNotFoundError
print("Checking for cached generator at {}".format(pickled_dict), file=sys.stderr)
with open(pickled_dict, "rb") as f:
mk = pickle.load(f)
except FileNotFoundError:
print("Training from corpus (may take a while).", file=sys.stderr)
mk = MarkovGenerator(order=args.order)
mk.train_file(args.corpus, normalise=functools.partial(latin_normalise, letters=args.letters + " ", lowercase=not args.casesensitive))
print("Saving generated generator to {}".format(pickled_dict), file=sys.stderr)
with open(pickled_dict, "wb") as f:
pickle.dump(mk, f, pickle.HIGHEST_PROTOCOL)
for i in range(args.number):
print(mk.render_word())
|
src/main/python/smart/analyze_events_v2.py | cday97/beam | 123 | 12667767 | <reponame>cday97/beam<filename>src/main/python/smart/analyze_events_v2.py
import json
import plotly.graph_objects as go
import pandas as pd
import os
from jsonmerge import merge
def get_pooling_metrics(_data):
count_of_multi_passenger_pool_trips = 0
count_of_one_passenger_pool_trips = 0
count_of_solo_trips = 0
count_of_unmatched_pool_requests = 0
count_of_unmatched_solo_requests = 0
sum_deadheading_distance_traveled = 0.0
sum_ride_hail_distance_traveled = 0.0
mode_choice_attempt = {}
person_has_shared_a_trip = {}
passengers_per_veh = {}
person_in_veh = {}
ct_nb_requests = {}
chained_trips_requests = 0
chained_trips_count = 0
for row in _data.itertuples():
person = row.person
vehicle = row.vehicle
mode = row.mode
event = row.type
passengers = row.numPassengers
distance = row.length
if event == "ModeChoice":
if str(person).startswith("rideHailAgent"):
print("ride hail driver with mode choice, does it ever occur !?")
elif mode.startswith("ride_hail"):
mode_choice_attempt[person] = mode
elif person in mode_choice_attempt and not mode_choice_attempt[person].endswith("unmatched"):
mode_choice_attempt[person] = mode_choice_attempt[person] + "_unmatched"
elif event == "PersonEntersVehicle":
if person not in mode_choice_attempt:
continue
chosen_mode = mode_choice_attempt[person]
if chosen_mode.endswith("unmatched"):
if chosen_mode.startswith("ride_hail_pooled"):
count_of_unmatched_pool_requests += 1
else:
count_of_unmatched_solo_requests += 1
del mode_choice_attempt[person]
elif not vehicle.startswith("rideHailVehicle"):
i = 0
# agent started walking towards ride hail vehicle
elif chosen_mode == "ride_hail_pooled":
person_in_veh[person] = vehicle
prev_pool = passengers_per_veh[vehicle] if vehicle in passengers_per_veh else 0
passengers_per_veh[vehicle] = prev_pool + 1
for p in {k: v for k, v in person_in_veh.items() if v == vehicle}:
if p not in person_has_shared_a_trip or not person_has_shared_a_trip[p]:
person_has_shared_a_trip[p] = passengers_per_veh[vehicle] > 1
# chained trips metrics
if prev_pool == 0:
ct_nb_requests[vehicle] = 0
ct_nb_requests[vehicle] += 1
else:
count_of_solo_trips += 1
elif event == "PersonLeavesVehicle":
if person not in mode_choice_attempt:
continue
if not vehicle.startswith("rideHailVehicle"):
i = 0
# agent ended walking towards the ride hail vehicle
elif mode_choice_attempt[person] == "ride_hail_pooled":
if passengers_per_veh[vehicle] > 1:
person_has_shared_a_trip[person] = True
if person_has_shared_a_trip[person] is True:
count_of_multi_passenger_pool_trips += 1
else:
count_of_one_passenger_pool_trips += 1
del person_has_shared_a_trip[person]
del person_in_veh[person]
passengers_per_veh[vehicle] -= 1
# chained trips metrics
if passengers_per_veh[vehicle] == 0:
chained_trips_requests = (chained_trips_requests * chained_trips_count + ct_nb_requests[vehicle])/(chained_trips_count+1)
chained_trips_count += 1
del mode_choice_attempt[person]
elif event == "PathTraversal":
if not vehicle.startswith("rideHailVehicle"):
continue
if int(passengers) == 0:
sum_deadheading_distance_traveled += float(distance)
sum_ride_hail_distance_traveled += float(distance)
del _data
tot_pool_trips = count_of_multi_passenger_pool_trips + count_of_one_passenger_pool_trips + \
count_of_unmatched_pool_requests
tot_solo_trips = count_of_solo_trips + count_of_unmatched_solo_requests
tot_rh_trips = tot_pool_trips + tot_solo_trips
tot_rh_unmatched = count_of_unmatched_pool_requests + count_of_unmatched_solo_requests
multi_passengers_trips_per_pool_trips = 0 if tot_pool_trips == 0 \
else count_of_multi_passenger_pool_trips / tot_pool_trips
multi_passengers_trips_per_ride_hail_trips = 0 if tot_rh_trips == 0 \
else count_of_multi_passenger_pool_trips / tot_rh_trips
unmatched_per_ride_hail_requests = 0 if tot_rh_trips == 0 \
else tot_rh_unmatched / tot_rh_trips
deadheading_per_ride_hail_trips = 0 if sum_ride_hail_distance_traveled == 0 \
else sum_deadheading_distance_traveled / sum_ride_hail_distance_traveled
return {
"ride_hail_requests": tot_rh_trips,
"ride_hail_solo_requests": count_of_solo_trips + count_of_unmatched_solo_requests,
"ride_hail_pool_requests": tot_pool_trips + count_of_unmatched_pool_requests,
"multi_passenger_pool_trips": count_of_multi_passenger_pool_trips,
"one_passenger_pool_trips": count_of_one_passenger_pool_trips,
"solo_trips": count_of_solo_trips,
"unmatched_pool_requests": count_of_unmatched_pool_requests,
"unmatched_solo_requests": count_of_unmatched_solo_requests,
"deadheading_distance_traveled": sum_deadheading_distance_traveled,
"ride_hail_distance_traveled": sum_ride_hail_distance_traveled,
"multi_passengers_trips_per_pool_trips": multi_passengers_trips_per_pool_trips,
"multi_passengers_trips_per_ride_hail_trips": multi_passengers_trips_per_ride_hail_trips,
"unmatched_per_ride_hail_requests": unmatched_per_ride_hail_requests,
"deadheading_per_ride_hail_trips": deadheading_per_ride_hail_trips,
"chained_trips_requests": chained_trips_requests,
"chained_trips_count": chained_trips_count
}
def get_all_metrics(filename, __local_file_path):
metrics_json = {}
pool_metrics_file_path = "{}.pooling-metrics.json".format(__local_file_path)
if os.path.exists(pool_metrics_file_path):
with open(pool_metrics_file_path) as f:
metrics_json = json.load(f)
compression = None
if filename.endswith(".gz"):
compression = 'gzip'
data = pd.read_csv(filename, sep=",", index_col=None, header=0, compression=compression)
modeChoice = data.loc[data['type'] == 'ModeChoice'].dropna(how='all', axis=1)
pathTraversal = data.loc[data['type'] == 'PathTraversal'].dropna(how='all', axis=1)
print("get_all_metrics ...")
if len(metrics_json) == 0:
ride_hail_mc = modeChoice[modeChoice['mode'].str.startswith('ride_hail')]
ride_hail_mc_users = set(ride_hail_mc['person'])
data2 = data[(data['type'].isin(['PathTraversal']) & data['vehicle'].str.startswith('rideHailVehicle')) |
(data['type'].isin(['ModeChoice', 'PersonEntersVehicle', 'PersonLeavesVehicle']) &
data['person'].isin(ride_hail_mc_users))]
del data
metrics_json = get_pooling_metrics(data2)
with open(pool_metrics_file_path, 'w') as outfile:
json.dump(metrics_json, outfile)
pooling_sankey_path = __local_file_path.rsplit("/", 1)[0] + "/sankey/" + __local_file_path.rsplit("/", 1)[1]
#generate_sankey_for_pooling(metrics_json, pooling_sankey_path)
else:
del data
pathTraversal['miles'] = pathTraversal['length'] / 1609.34
pathTraversal['gallons'] = (pathTraversal['primaryFuel'] + pathTraversal['secondaryFuel']) * 8.3141841e-9
pathTraversal['mpg'] = pathTraversal['miles'] / pathTraversal['gallons']
pathTraversal['startingPrimaryFuelLevel'] = pathTraversal['primaryFuelLevel'] + pathTraversal['primaryFuel']
pathTraversal['mode_extended'] = pathTraversal['mode']
pathTraversal['isRH'] = pathTraversal['vehicle'].str.contains('rideHail')
pathTraversal['isCAV'] = pathTraversal['vehicleType'].str.contains('L5')
pathTraversal.loc[pathTraversal['isRH'], 'mode_extended'] += '_RH'
pathTraversal.loc[pathTraversal['isCAV'], 'mode_extended'] += '_CAV'
pathTraversal['trueOccupancy'] = pathTraversal['numPassengers']
pathTraversal.loc[pathTraversal['mode_extended'] == 'car', 'trueOccupancy'] += 1
pathTraversal.loc[pathTraversal['mode_extended'] == 'walk', 'trueOccupancy'] = 1
pathTraversal.loc[pathTraversal['mode_extended'] == 'bike', 'trueOccupancy'] = 1
pathTraversal['vehicleMiles'] = pathTraversal['length']/1609.34
pathTraversal['passengerMiles'] = (pathTraversal['length'] * pathTraversal['trueOccupancy'])/1609.34
pathTraversal['vehicleHours'] = (pathTraversal['arrivalTime'] - pathTraversal['departureTime'])/3600
pathTraversal['passengerHours'] = pathTraversal['vehicleHours'] * pathTraversal['trueOccupancy']
pathTraversal = pathTraversal.loc[~((pathTraversal['mode']=='walk') & (pathTraversal['vehicleHours']>2)),:]
lightDutyVehiclePathTraversals=pathTraversal.loc[(pathTraversal['vehicleType'].str.contains("BUS")==False) &
(pathTraversal['vehicleType'].str.contains("BIKE")==False) &
(pathTraversal['vehicleType'].str.contains("BODY")==False) &
(pathTraversal['vehicleType'].str.contains("CABLE")==False) &
(pathTraversal['vehicleType'].str.contains("FERRY")==False) &
(pathTraversal['vehicleType'].str.contains("SUBWAY")==False) &
(pathTraversal['vehicleType'].str.contains("TRAM")==False) &
(pathTraversal['vehicleType'].str.contains("TRAIN")==False),:]
metrics_json['total_VHT_LightDutyVehicles'] = lightDutyVehiclePathTraversals['vehicleHours'].sum()
modeChoiceTotals = modeChoice.groupby('mode').agg({'person': 'count', 'length': 'sum'})
for mode in modeChoiceTotals.index:
metrics_json[mode+'_counts'] = int(modeChoiceTotals.loc[mode,'person'])
pathTraversalModes = pathTraversal.groupby('mode_extended').agg({'vehicleMiles': 'sum', 'primaryFuel': 'sum', 'secondaryFuel': 'sum', 'passengerMiles': 'sum','vehicleHours': 'sum', 'passengerHours':'sum'})
for mode in pathTraversalModes.index:
metrics_json['VMT_' + mode] = float(pathTraversalModes.loc[mode, 'vehicleMiles'])
metrics_json['PMT_' + mode] = float(pathTraversalModes.loc[mode, 'passengerMiles'])
metrics_json['VHT_' + mode] = float(pathTraversalModes.loc[mode, 'vehicleHours'])
metrics_json['PHT_' + mode] = float(pathTraversalModes.loc[mode, 'passengerHours'])
metrics_json['Energy_' + mode] = float(pathTraversalModes.loc[mode, 'primaryFuel'] + pathTraversalModes.loc[mode, 'secondaryFuel'])
for mode in pathTraversalModes.index:
metrics_json['VMT_' + mode + "_empty"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 0), 'vehicleMiles'].sum())
metrics_json['VMT_' + mode + "_shared"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] > 1), 'vehicleMiles'].sum())
metrics_json['VMT_' + mode + "_shared_2p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 2), 'vehicleMiles'].sum())
metrics_json['VMT_' + mode + "_shared_3p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 3), 'vehicleMiles'].sum())
metrics_json['VMT_' + mode + "_shared_4p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] >= 4), 'vehicleMiles'].sum())
metrics_json['PMT_' + mode + "_empty"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 0), 'passengerMiles'].sum())
metrics_json['PMT_' + mode + "_shared"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] > 1), 'passengerMiles'].sum())
metrics_json['PMT_' + mode + "_shared_2p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 2), 'passengerMiles'].sum())
metrics_json['PMT_' + mode + "_shared_3p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 3), 'passengerMiles'].sum())
metrics_json['PMT_' + mode + "_shared_4p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] >= 4), 'passengerMiles'].sum())
metrics_json['VHT_' + mode + "_empty"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 0), 'vehicleHours'].sum())
metrics_json['VHT_' + mode + "_shared"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] > 1), 'vehicleHours'].sum())
metrics_json['VHT_' + mode + "_shared_2p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 2), 'vehicleHours'].sum())
metrics_json['VHT_' + mode + "_shared_3p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] == 3), 'vehicleHours'].sum())
metrics_json['VHT_' + mode + "_shared_4p"] = float(pathTraversal.loc[(pathTraversal['mode_extended'] == mode) & (pathTraversal['trueOccupancy'] >= 4), 'vehicleHours'].sum())
metrics_json['VMT_L1'] = float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L1'), 'vehicleMiles'].sum())
metrics_json['VMT_L3'] = float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L3'), 'vehicleMiles'].sum())
metrics_json['VMT_L5'] = float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L5'), 'vehicleMiles'].sum())
expansion_factor=(7.75/0.315) * 27.0 / 21.3
transitPathTraversals=pathTraversal.loc[(pathTraversal['vehicleType'].str.contains("BUS")==True) |
(pathTraversal['vehicleType'].str.contains("BIKE")==True) |
(pathTraversal['vehicleType'].str.contains("BODY")==True) |
(pathTraversal['vehicleType'].str.contains("CABLE")==True) |
(pathTraversal['vehicleType'].str.contains("FERRY")==True) |
(pathTraversal['vehicleType'].str.contains("SUBWAY")==True) |
(pathTraversal['vehicleType'].str.contains("TRAM")==True) |
(pathTraversal['vehicleType'].str.contains("TRAIN")==True),:]
transit_primaryFuelTypes = transitPathTraversals.groupby('primaryFuelType').agg({'primaryFuel': 'sum'})
transit_secondaryFuelTypes = transitPathTraversals.groupby('secondaryFuelType').agg({'secondaryFuel': 'sum'})
ldv_primaryFuelTypes = lightDutyVehiclePathTraversals.groupby('primaryFuelType').agg({'primaryFuel': 'sum'})
ldv_secondaryFuelTypes = lightDutyVehiclePathTraversals.groupby('secondaryFuelType').agg({'secondaryFuel': 'sum'})
primaryFuelTypes = pathTraversal.groupby('primaryFuelType').agg({'primaryFuel': 'sum'})
secondaryFuelTypes = pathTraversal.groupby('secondaryFuelType').agg({'secondaryFuel': 'sum'})
for fueltype in primaryFuelTypes.index:
metrics_json['totalEnergy_' + fueltype] = 0
for fuelType in secondaryFuelTypes.index:
if 'None' not in fuelType:
metrics_json['totalEnergy_' + fuelType] = 0
for fueltype in transit_primaryFuelTypes.index:
metrics_json['totalEnergy_' + fueltype] += float(transit_primaryFuelTypes.loc[fueltype, 'primaryFuel']) /expansion_factor
for fuelType in transit_secondaryFuelTypes.index:
if 'None' not in fuelType:
metrics_json['totalEnergy_' + fuelType] += float(transit_secondaryFuelTypes.loc[fueltype, 'secondaryFuel'])/expansion_factor
for fueltype in ldv_primaryFuelTypes.index:
metrics_json['totalEnergy_' + fueltype] += float(ldv_primaryFuelTypes.loc[fueltype, 'primaryFuel'])
for fuelType in ldv_secondaryFuelTypes.index:
if 'None' not in fuelType:
metrics_json['totalEnergy_' + fuelType] += float(ldv_secondaryFuelTypes.loc[fueltype, 'secondaryFuel'])
print("get_all_metrics done")
return metrics_json
def generate_sankey_for_pooling(_df, _local_filename_itr, _unit=1000.0):
pool_tot_share = _df["multi_passengers_trips_per_ride_hail_trips"]
pool_share = _df["multi_passengers_trips_per_pool_trips"]
solo_share = (_df["solo_trips"]+_df["one_passenger_pool_trips"])/_df["ride_hail_requests"]
unmatched_share = (_df["unmatched_pool_requests"]+_df["unmatched_solo_requests"])/_df["ride_hail_requests"]
labels = ["pool requests: {:.1f}K".format(_df["ride_hail_pool_requests"]/_unit),
"solo requests: {:.1f}K".format(_df["ride_hail_solo_requests"]/_unit),
"pool: {:.1%} ({:.1%})".format(pool_tot_share, pool_share),
"solo: {:.1%}".format(solo_share),
"unmatched: {:.1%}".format(unmatched_share)]
fig = go.Figure(data=[go.Sankey(
# Define nodes
node=dict(
pad=15,
thickness=15,
line=dict(color="black", width=0.5),
label=labels
),
# Add links
link=dict(
source=[0, 0, 0, 1, 1],
target=[2, 3, 4, 3, 4],
value=[_df["multi_passenger_pool_trips"],
_df["one_passenger_pool_trips"],
_df["unmatched_pool_requests"],
_df["solo_trips"],
_df["unmatched_solo_requests"]]
))])
fig.update_layout(title_text="Sankey Diagram For Pooling", font_size=10)
fig.write_image("{}.pooling-sankey.png".format(_local_filename_itr)) |
nni/nas/benchmarks/utils.py | sky-dust-intelligence-bv/nni | 2,305 | 12667814 | <reponame>sky-dust-intelligence-bv/nni<gh_stars>1000+
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import functools
import json
import os
from playhouse.sqlite_ext import SqliteExtDatabase
from nni.common.blob_utils import load_or_download_file
from .constants import DB_URLS, DATABASE_DIR
json_dumps = functools.partial(json.dumps, sort_keys=True)
# to prevent repetitive loading of benchmarks
_loaded_benchmarks = {}
def load_benchmark(benchmark: str) -> SqliteExtDatabase:
"""
Load a benchmark as a database.
Parmaeters
----------
benchmark : str
Benchmark name like nasbench201.
"""
if benchmark in _loaded_benchmarks:
return _loaded_benchmarks[benchmark]
url = DB_URLS[benchmark]
local_path = os.path.join(DATABASE_DIR, os.path.basename(url))
load_or_download_file(local_path, url)
_loaded_benchmarks[benchmark] = SqliteExtDatabase(local_path, autoconnect=True)
return _loaded_benchmarks[benchmark]
def download_benchmark(benchmark: str, progress: bool = True):
"""
Download a converted benchmark.
Parameters
----------
benchmark : str
Benchmark name like nasbench201.
"""
url = DB_URLS[benchmark]
local_path = os.path.join(DATABASE_DIR, os.path.basename(url))
load_or_download_file(local_path, url, True, progress)
|
pysparkling/sql/tests/expressions/test_mappers.py | ptallada/pysparkling | 260 | 12667825 | from unittest import TestCase
from pysparkling.utils import MonotonicallyIncreasingIDGenerator
class MonotonicallyIncreasingIDGeneratorTests(TestCase):
def test_init_ok(self):
sut = MonotonicallyIncreasingIDGenerator(0)
self.assertEqual(sut.value, -1) # Shouldn't we throw an error here?
sut = MonotonicallyIncreasingIDGenerator(1)
self.assertEqual(sut.value, 8589934592 - 1) # I do it this way so I can easily find/replace the value
sut = MonotonicallyIncreasingIDGenerator(2)
self.assertEqual(sut.value, 2 * 8589934592 - 1)
def test_next_value_ok(self):
sut = MonotonicallyIncreasingIDGenerator(1)
self.assertEqual(next(sut), 8589934592)
self.assertEqual(next(sut), 8589934593)
self.assertEqual(next(sut), 8589934594)
|
aiohttp_admin/resource.py | asvetlov/aiohttp_admin | 221 | 12667853 | <reponame>asvetlov/aiohttp_admin
from abc import abstractmethod, ABCMeta
from .security import Permissions, require
from .utils import json_response, validate_query
class AbstractResource(metaclass=ABCMeta):
def __init__(self, *, primary_key, resource_name=None):
class_name = self.__class__.__name__.lower()
self._resource_name = resource_name or class_name
self._primary_key = primary_key
@property
def primary_key(self):
return self._primary_key
@abstractmethod
async def list(self, request): # pragma: no cover
await require(request, Permissions.view)
q = validate_query(request.GET)
assert q
# total number of results should be supplied in separate
headers = {'X-Total-Count': str(0)}
return json_response({}, headers=headers)
@abstractmethod
async def detail(self, request): # pragma: no cover
await require(request, Permissions.view)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
@abstractmethod
async def create(self, request): # pragma: no cover
await require(request, Permissions.add)
return json_response({})
@abstractmethod
async def update(self, request): # pragma: no cover
await require(request, Permissions.edit)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
@abstractmethod
async def delete(self, request): # pragma: no cover
await require(request, Permissions.delete)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
def setup(self, app, base_url):
url = str(base_url / self._resource_name)
url_id = url + '/{entity_id}'
add_route = app.router.add_route
add_route('GET', url, self.list)
add_route('GET', url_id, self.detail)
add_route('POST', url, self.create)
add_route('PUT', url_id, self.update)
add_route('DELETE', url_id, self.delete)
|
src/genie/libs/parser/nxos/tests/ShowModule/cli/equal/golden_output2_expected.py | balmasea/genieparser | 204 | 12667913 | <filename>src/genie/libs/parser/nxos/tests/ShowModule/cli/equal/golden_output2_expected.py
expected_output = {
"slot": {
"lc": {
"1": {
"16x400G Ethernet Module": {
"hardware": "3.1",
"mac_address": "bc-4a-56-ff-fa-5b to bc-4a-56-ff-fb-dd",
"model": "N9K-X9716D-GX",
"online_diag_status": "Pass",
"ports": "16",
"serial_number": "FOC24322RBW",
"slot": "1",
"slot/world_wide_name": "LC1",
"software": "10.1(0.233)",
"status": "ok"
}
},
"2": {
"36x40/100G Ethernet Module": {
"hardware": "1.1",
"mac_address": "90-77-ee-ff-2d-b0 to 90-77-ee-ff-2e-43",
"model": "N9K-X9736C-FX",
"online_diag_status": "Pass",
"ports": "36",
"serial_number": "FOC24294DJ8",
"slot": "2",
"slot/world_wide_name": "LC2",
"software": "10.1(0.233)",
"status": "ok"
}
},
"22": {
"8-slot (100G) Fabric Module": {
"hardware": "1.1",
"mac_address": "NA",
"model": "N9K-C9508-FM-E2",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC24381TPG",
"slot": "22",
"slot/world_wide_name": "FM2",
"software": "10.1(0.233)",
"status": "ok"
}
},
"24": {
"8-slot (100G) Fabric Module": {
"hardware": "1.1",
"mac_address": "NA",
"model": "N9K-C9508-FM-E2",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC24381TX1",
"slot": "24",
"slot/world_wide_name": "FM4",
"software": "10.1(0.233)",
"status": "ok"
}
},
"26": {
"8-slot (100G) Fabric Module": {
"hardware": "1.1",
"mac_address": "NA",
"model": "N9K-C9508-FM-E2",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC24381TUV",
"slot": "26",
"slot/world_wide_name": "FM6",
"software": "10.1(0.233)",
"status": "ok"
}
},
"29": {
"System Controller": {
"hardware": "2.0",
"mac_address": "NA",
"model": "N9K-SC-A",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC24362EU0",
"slot": "29",
"slot/world_wide_name": "SC1",
"software": "10.1(0.233)",
"status": "active"
}
},
"30": {
"System Controller": {
"hardware": "2.0",
"mac_address": "NA",
"model": "N9K-SC-A",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC2435407P",
"slot": "30",
"slot/world_wide_name": "SC2",
"software": "10.1(0.233)",
"status": "standby"
}
},
"5": {
"36x40G Ethernet": {
"model": "Module",
"ports": "36",
"slot": "5",
"status": "pwr-denied"
}
},
"6": {
"48x10/25G + 4x40/100G Ethernet Module": {
"hardware": "2.3",
"mac_address": "24-16-9d-ff-9a-09 to 24-16-9d-ff-9a-4c",
"model": "N9K-X97160YC-EX",
"online_diag_status": "Pass",
"ports": "52",
"serial_number": "FOC24021CNU",
"slot": "6",
"slot/world_wide_name": "LC6",
"software": "10.1(0.233)",
"status": "ok"
}
},
"7": {
"48x10G + 4x40/100G Ethernet": {
"model": "Module",
"ports": "52",
"slot": "7",
"status": "pwr-denied"
}
}
},
"rp": {
"27": {
"Supervisor Module": {
"hardware": "1.1",
"mac_address": "54-88-de-ff-09-2f to 54-88-de-ff-09-40",
"model": "N9K-SUP-A+",
"online_diag_status": "Pass",
"ports": "0",
"serial_number": "FOC24362EGB",
"slot": "27",
"slot/world_wide_name": "SUP1",
"software": "10.1(0.233)",
"status": "active"
}
}
}
}
}
|
python/sorting/Randomized Pivot QuickSort.py | jigneshoo7/AlgoBook | 191 | 12667950 | <reponame>jigneshoo7/AlgoBook
# Python implementation Randomized Pivot QuickSort using Lomuto's partition Scheme.
# Contributed by @ddhira123
# References: GeeksforGeeks
import random
'''
The function which implements QuickSort.
array : array to be sorted.
start : starting index of the array.
stop : ending index of the array.
'''
def quicksort(array, start , stop):
if(start < stop):
# pivot is the index where the pivot index lies in the array
# The pivot index is randomly generated by random_partition function
pivot = random_partition(array, start, stop)
# At this stage the array is partially sorted around the pivot.
# Separately sorting the left part of the array and the
# right part of the array that has partitioned by pivot.
quicksort(array , start , pivot-1)
quicksort(array, pivot + 1, stop)
# This function generates random pivot, swaps the first element with the pivot
# and calls the partition function.
def random_partition(array , start, stop):
# Generating a random number between the starting index of the array and the ending index of the array.
randpivot = random.randrange(start, stop)
# Swapping the starting element of the array and the pivot
array[start], array[randpivot] = array[randpivot], array[start]
return partition(array, start, stop)
'''
This function takes the first element as pivot, places the pivot element at the correct position
in the sorted array. All the elements are re-arranged according to the pivot, the elements smaller than the
pivot is places on the left and the elements greater than the pivot is placed to the right of pivot.
'''
def partition(array,start,stop):
pivot = start # pivot
# a variable to memorize where the current position of start lies
# This also holds important aspect for the quicksort() so that can
# reach the stopping condition.
i = start + 1
# partition in the array starts from.
for j in range(start + 1, stop + 1):
# if the current element is smaller or equal to pivot,
# place it to the left side of the partition.
if array[j] <= array[pivot]:
array[i] , array[j] = array[j] , array[i]
i = i + 1
array[pivot] , array[i - 1] = array[i - 1] , array[pivot]
pivot = i - 1
return (pivot)
# Driver Code
if __name__ == "__main__":
array = [99, 1, 4, 3, 17, 21, 16, 34, 29]
quicksort(array, 0, len(array) - 1)
print(array) |
holoviews/tests/element/test_comparisoncomposite.py | TheoMathurin/holoviews | 864 | 12667965 | <gh_stars>100-1000
"""
Test cases for the Comparisons class over the composite types:
Layout (the + operator)
Overlay (the * operator)
HoloMaps are not tested in this file.
"""
from holoviews import Element
from holoviews.element.comparison import ComparisonTestCase
class CompositeComparisonTestCase(ComparisonTestCase):
def setUp(self):
self.el1 = Element('data1')
self.el2 = Element('data2')
self.el3 = Element('data3')
self.el4 = Element('data5', group='ValB')
self.el5 = Element('data6', label='LabelA')
#========================#
# Tests for layout trees #
#========================#
def test_layouttree_comparison_equal(self):
t1 = self.el1 + self.el2
t2 = self.el1 + self.el2
self.assertEqual(t1, t2)
def test_layouttree_comparison_equal_large(self):
t1 = self.el1 + self.el2 + self.el4 + self.el5
t2 = self.el1 + self.el2 + self.el4 + self.el5
self.assertEqual(t1, t2)
def test_layouttree_comparison_unequal_data(self):
t1 = self.el1 + self.el2
t2 = self.el1 + self.el3
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e),"'data2' != 'data3'")
def test_layouttree_comparison_unequal_paths(self):
t1 = self.el1 + self.el2
t2 = self.el1 + self.el2.relabel(group='ValA')
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Layouts have mismatched paths.')
def test_layouttree_comparison_unequal_sizes(self):
t1 = self.el1 + self.el2
t2 = self.el1 + self.el2 + self.el3
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Layouts have mismatched path counts.')
#=============================#
# Matching tests for Overlays #
#=============================#
def test_overlay_comparison_equal(self):
t1 = self.el1 * self.el2
t2 = self.el1 * self.el2
self.assertEqual(t1, t2)
def test_overlay_comparison_equal_large(self):
t1 = self.el1 * self.el2 * self.el3 * self.el4
t2 = self.el1 * self.el2 * self.el3 * self.el4
self.assertEqual(t1, t2)
def test_overlay_comparison_unequal_data(self):
t1 = self.el1 * self.el2
t2 = self.el1 * self.el3
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e),"'data2' != 'data3'")
def test_overlay_comparison_unequal_paths(self):
t1 = self.el1 * self.el2
t2 = self.el1 * self.el2.relabel(group='ValA')
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Overlays have mismatched paths.')
def test_overlay_comparison_unequal_sizes(self):
t1 = self.el1 * self.el2
t2 = self.el1 * self.el2 * self.el3
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Overlays have mismatched path counts.')
#==================================#
# Mixed composite comparison tests #
#==================================#
def test_composite_comparison_equal(self):
t1 = (self.el1 * self.el2) + (self.el1 * self.el2)
t2 = (self.el1 * self.el2) + (self.el1 * self.el2)
self.assertEqual(t1, t2)
def test_composite_unequal_data(self):
t1 = (self.el1 * self.el2) + (self.el1 * self.el2)
t2 = (self.el1 * self.el2) + (self.el1 * self.el3)
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), "'data2' != 'data3'")
def test_composite_unequal_paths_outer(self):
t1 = (self.el1 * self.el2) + (self.el1 * self.el2).relabel(group='ValA')
t2 = (self.el1 * self.el2) + (self.el1 * self.el3)
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Layouts have mismatched paths.')
def test_composite_unequal_paths_inner(self):
t1 = (self.el1 * self.el2) + (self.el1 * self.el2.relabel(group='ValA'))
t2 = (self.el1 * self.el2) + (self.el1 * self.el3)
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Overlays have mismatched paths.')
def test_composite_unequal_sizes(self):
t1 = (self.el1 * self.el2) + (self.el1 * self.el2) + self.el3
t2 = (self.el1 * self.el2) + (self.el1 * self.el2)
try:
self.assertEqual(t1, t2)
except AssertionError as e:
self.assertEqual(str(e), 'Layouts have mismatched path counts.')
|
spack/package.py | zmxdream/FlexFlow | 455 | 12667996 | <gh_stars>100-1000
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install flexflow
#
# You can edit this file again by typing:
#
# spack edit flexflow
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
import os
from spack import *
class Flexflow(CMakePackage):
"""FlexFlow is a deep learning framework that accelerates
distributed DNN training by automatically searching for
efficient parallelization strategies. FlexFlow provides
a drop-in replacement for TensorFlow Keras and PyTorch. """
homepage = "http://flexflow.ai"
git = "https://github.com/flexflow/FlexFlow.git"
maintainers = ['jiazhihao', 'eddy16112']
version('master', branch='master', submodules=True)
depends_on("[email protected]:", type='build')
depends_on('[email protected]:11.9')
depends_on('cudnn')
depends_on('nccl', when='+nccl')
depends_on('[email protected]:3.9', when='+python')
depends_on('mpi', when='network=gasnet')
depends_on('ucx', when='conduit=ucx')
depends_on('mpi', when='conduit=mpi')
variant('max_dims', values=int, default=4, description="Set max number of dimensions for logical regions.")
variant('zlib', default=True, description="Enable zlib support.")
variant('nccl', default=False, description="Enable zlib support.")
variant('python', default=True, description="Enable Python support.")
variant('examples', default=False, description="Build all examples.")
variant('avx2', default=False, description="Enable AVX2 support.")
variant('gasnet', default=False, description="Enable GASNet support.")
variant('conduit', default='none',
values=('aries', 'ibv', 'udp', 'mpi', 'ucx', 'none'),
description="The gasnet conduit(s) to enable.",
multi=False)
conflicts('conduit=none', when='gasnet=True',
msg="a conduit must be selected when enable GASNet")
# cuda_arch=0 means FlexFlow will automatically detect the cuda arch of the current platform
cuda_arch_list = ('0', '60', '70', '75', '80')
variant('cuda_arch', default='0',
values=cuda_arch_list,
description="GPU/CUDA architecture to build for.",
multi=False)
def cmake_args(self):
spec = self.spec
cmake_cxx_flags = []
options = ['-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF']
if '+python' in spec:
options.append('-DFF_USE_PYTHON=ON')
else:
options.append('-DFF_USE_PYTHON=OFF')
if '+nccl' in spec:
options.append('-DFF_USE_NCCL=ON')
else:
options.append('-DFF_USE_NCCL=OFF')
if '+examples' in spec:
options.append('-DFF_BUILD_ALL_EXAMPLES=ON')
else:
options.append('-DFF_BUILD_ALL_EXAMPLES=OFF')
if '+avx2' in spec:
options.append('-DFF_USE_AVX2=ON')
else:
options.append('-DFF_USE_AVX2=OFF')
if '+gasnet' in spec:
options.append('-DFF_USE_GASNET=ON')
gasnet_conduit = spec.variants['conduit'].value
options.append('-DFF_GASNET_CONDUIT=%s' % gasnet_conduit)
else:
options.append('-DFF_USE_GASNET=OFF')
maxdims = int(spec.variants['max_dims'].value)
options.append('-DFF_MAX_DIM=%d' % maxdims)
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch != '0':
options.append('-DFF_CUDA_ARCH=%s' % cuda_arch)
return options |
jack/io/embeddings/__init__.py | elyase/jack | 192 | 12668009 | # -*- coding: utf-8 -*-
from jack.io.embeddings.embeddings import Embeddings, load_embeddings
from jack.io.embeddings.glove import load_glove
__all__ = [
'Embeddings',
'load_embeddings'
'load_word2vec',
'get_word2vec_vocabulary',
'load_glove',
]
|
toad/cli.py | Padfoot-ted/toad | 325 | 12668058 | """
toad command line application
"""
import argparse
from .commands import get_plugins
def add_sub(parsers, config):
"""add sub parser by config
"""
info = config.get('info', {})
args = config.get('args', [])
defaults = config.get('defaults', None)
sub_parser = parsers.add_parser(**info)
for detail in args:
flag = detail.pop('flag')
sub_parser.add_argument(*flag, **detail)
if defaults:
sub_parser.set_defaults(**defaults)
def get_parser():
"""get parser
"""
parser = argparse.ArgumentParser(
prog = 'toad',
description = 'Detect data from a csv file',
)
subparsers = parser.add_subparsers()
plugins = get_plugins()
for plug in plugins:
add_sub(subparsers, plug.ARGS)
return parser
def main():
"""
"""
parser = get_parser()
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
if __name__ == '__main__':
main()
|
Geometry/HcalEventSetup/python/HcalGeometry_cfi.py | ckamtsikis/cmssw | 852 | 12668061 | import FWCore.ParameterSet.Config as cms
HcalHardcodeGeometryEP = cms.ESProducer("HcalHardcodeGeometryEP" ,
UseOldLoader = cms.bool(False)
)
|
calvin/tests/test_codegen.py | gabrielcercel/calvin-base | 334 | 12668084 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.Tools.cscompiler import compile_file
import unittest
import json
import glob
import os
# import difflib
import collections
import pytest
def absolute_filename(filename):
return os.path.join(os.path.dirname(__file__), filename)
def _read_file(file):
try:
with open(file, 'r') as source:
# print source.encoding
text = str(source.read())
# print type(text), type(unicode(text))
except Exception as e:
print "Error: Could not read file: '%s'" % file
raise e
return text
def _filepath(testname, ext):
return "{}{}.{}".format(absolute_filename('codegen/'), testname, ext)
def _codegen(testname, ds, credentials):
test_file = _filepath(testname, "calvin")
# code, issuetracker = compile_file(filename, ds, ir, credentials=None)
code, it = compile_file(test_file, ds, ir=False, credentials=credentials)
code = json.loads(json.dumps(code)) # FIXME: Is there no other way of making this unicode???
ref_file = _filepath(testname, "deployjson" if ds else "ref")
ref_code = _read_file(ref_file)
# print "ref_code", type(ref_code)
ref_code = json.loads(ref_code)
ref_code.setdefault('valid', True)
# print code, ref_code
return code, it, ref_code
def cs_codegen(testname):
return _codegen(testname, ds=False, credentials=None)
def ds_codegen(testname):
return _codegen(testname, ds=True, credentials=None)
# Since the ds contains nested lists we cannot simply use == to check for equality
def compare(dut, ref):
if isinstance(ref, basestring):
# print "basestring"
# print "Comparing {} and {}".format(dut, ref)
assert dut == ref
elif isinstance(ref, collections.Mapping):
# print "mapping"
# print "Comparing {} and {}".format(dut, ref)
keys = set(ref.keys())
assert set(dut.keys()) == keys
for key in keys:
compare(dut[key], ref[key])
elif isinstance(ref, collections.Iterable):
# print "iterable"
# print "Comparing {} and {}".format(dut, ref)
assert len(dut) == len(ref)
pairs = zip(dut, ref)
for pair in pairs:
compare(*pair)
else:
# print "other"
# print "Comparing {} and {}".format(dut, ref)
assert dut == ref
# See https://stackoverflow.com/a/25851972
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
test_list = [os.path.basename(x)[:-7] for x in glob.glob("{}/*.calvin".format(absolute_filename('codegen')))]
@pytest.mark.parametrize("test", test_list)
def testCalvinScriptCodegen(test):
code, it, ref = cs_codegen(test)
assert it.error_count == 0
compare(ordered(code), ordered(ref))
@pytest.mark.parametrize("test", test_list)
def testCalvinScriptDeploygen(test):
code, it, ref = ds_codegen(test)
assert it.error_count == 0
compare(ordered(code), ordered(ref))
|
tensorboard/plugins/audio/audio_demo.py | Digitaltransform/tensorboard | 6,139 | 12668131 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample data exhibiting audio summaries, via a waveform generator."""
import inspect
import math
import os.path
from absl import app
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
"logdir",
"/tmp/audio_demo",
"Directory into which to write TensorBoard data.",
)
flags.DEFINE_integer(
"steps", 5, "Number of frequencies of each waveform to generate."
)
# Parameters for the audio output.
flags.DEFINE_integer("sample_rate", 44100, "Sample rate, in Hz.")
flags.DEFINE_float("duration", 2.0, "Duration of each waveform, in s.")
def _samples():
"""Compute how many samples should be included in each waveform."""
return int(FLAGS.sample_rate * FLAGS.duration)
def run(wave_name, wave_constructor, step):
"""Generate an audio waveform and write it to summaries.
Waves will be generated at frequencies ranging from A4 to A5.
Args:
wave_name: the name of the wave being generated
wave_constructor: a function that accepts a float32 frequency (in Hz) at
which to construct a wave, and returns a tensor of shape
[1, _samples(), `n`] representing audio data (for some number of
channels `n`).
step: number step
"""
# For the given step, linearly interpolate a frequency between A4 (440 Hz)
# and A5 (880 Hz) and create its waveform.
f_min = 440.0
f_max = 880.0
t = step / (FLAGS.steps - 1)
frequency = f_min * (1.0 - t) + f_max * t
waveform = wave_constructor(frequency)
# Optionally generate a description that will appear in TensorBoard
# next to the audio. This one includes the source code behind the
# waveform for context.
source = "\n".join(
" %s" % line.rstrip()
for line in inspect.getsourcelines(wave_constructor)[0]
)
description = "A wave of type `%r`, generated via:\n\n%s" % (
wave_name,
source,
)
# Write the audio waveform summary. The `waveform` is a
# [num_clips, num_frames, num_channels] shaped tensor.
tf.summary.audio(
"waveform",
waveform,
FLAGS.sample_rate,
step=step,
description=description,
)
def sine_wave(frequency):
"""Emit a sine wave at the given frequency."""
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
return tf.sin(2 * math.pi * frequency * ts)
def square_wave(frequency):
"""Emit a square wave at the given frequency."""
# The square is just the sign of the sine!
return tf.sign(sine_wave(frequency))
def bisine_wave(frequency):
"""Emit two sine waves, in stereo at different octaves."""
# Generate 2 sine waves, each of which is a [1, _samples(), 1] shaped tensor.
sine_hi = sine_wave(frequency)
sine_lo = sine_wave(frequency / 2.0)
# Concatenating along axis 2 produces a [1, _samples(), 2] shaped tensor, a
# stereo (2 channel) audio waveform.
sample1 = tf.concat([sine_lo, sine_hi], axis=2)
sample2 = tf.concat([sine_hi, sine_lo], axis=2)
# Return [2, _samples(), 2], representing 2 audio clips.
return tf.concat([sample1, sample2], axis=0)
def run_all(base_logdir):
"""Generate waves of the shapes defined above.
For each wave, creates a run that contains summaries.
Arguments:
base_logdir: the directory into which to store all the runs' data
"""
waves = [
("sine_wave", sine_wave),
("square_wave", square_wave),
("bisine_wave", bisine_wave),
]
for wave_name, wave_constructor in waves:
logdir = os.path.join(base_logdir, wave_name)
writer = tf.summary.create_file_writer(logdir)
with writer.as_default():
for step in range(FLAGS.steps):
run(wave_name, wave_constructor, step)
def main(unused_argv):
print("Saving output to %s." % FLAGS.logdir)
print(
"To view results in your browser, run `tensorboard --logdir %s`"
% FLAGS.logdir
)
run_all(FLAGS.logdir)
print("Done. Output saved to %s." % FLAGS.logdir)
if __name__ == "__main__":
app.run(main)
|
src/utils_ui.py | maiki/k3x | 188 | 12668147 | <gh_stars>100-1000
# utils_ui.py
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import random
from typing import Optional, Tuple, Callable
from gi.repository import GdkPixbuf, Granite, Gtk, Notify as notify
from .config import APP_TITLE, DEFAULT_NOTIFICATION_TIMEOUT, DEFAULT_NOTIFICATION_ERROR_TIMEOUT
from .config import ApplicationSettings
from .utils import call_in_main_thread, running_on_main_thread
###############################################################################
# messages and notyfications
###############################################################################
def show_notification(msg, header: str = None, icon: str = None,
timeout: Optional[int] = None,
action: Optional[Tuple[str, Callable]] = None,
threaded: bool = True,
is_error: bool = False):
"""
Show a desktop notification
"""
# see https://lazka.github.io/pgi-docs/#Notify-0.7
# maybe we could also use https://notify2.readthedocs.io/en/latest/
if not header:
header = APP_TITLE
icon_filename = None
if not icon:
icon_filename = ApplicationSettings.get_app_icon()
if is_error:
t = timeout if timeout is not None else DEFAULT_NOTIFICATION_ERROR_TIMEOUT
logging.error(msg)
else:
t = timeout if timeout is not None else DEFAULT_NOTIFICATION_TIMEOUT
logging.debug(msg)
def do_notify():
assert running_on_main_thread()
n = notify.Notification.new(header, msg, icon)
n.set_app_name(APP_TITLE)
if icon_filename is not None:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(icon_filename)
n.set_icon_from_pixbuf(pixbuf)
# Note that the timeout may be ignored by the server.
n.set_timeout(t)
if action is not None:
action_str, action_callback = action
r = random.randrange(0, 10000)
n.add_action(f"{r}-{APP_TITLE}-id", action_str, action_callback, None)
n.show()
if not threaded: # important: do not return anything if invoked with `call_in_main_thread`
return n
if threaded:
call_in_main_thread(do_notify)
return None
else:
return do_notify()
def show_error_dialog(msg: str, explanation: str, icon: str = "dialog-error",
parent=None, ok_label: str = "Ok") -> None:
"""
Show a info/warning dialog, with just a "Close" button
"""
def do_show():
error_diag = Granite.MessageDialog.with_image_from_icon_name(
msg, "\n\n" + explanation, icon, Gtk.ButtonsType.CLOSE)
if parent is not None:
error_diag.set_transient_for(parent)
error_diag.set_flags = Gtk.DialogFlags.MODAL
error_diag.connect("response", lambda widget, response_id: widget.destroy())
error_diag.show_all()
if running_on_main_thread():
do_show()
else:
call_in_main_thread(do_show)
def show_warning_dialog(msg: str, explanation: str):
"""
Show a warning dialog, with just one OK button
"""
show_error_dialog(msg, explanation, icon="dialog-warning")
###############################################################################
# settings
###############################################################################
def _link_gtk_entry_to_settings(settings: ApplicationSettings, entry: Gtk.Entry, settings_id: str):
"""
Link a Gtk.Entry to a GSettings ID, so any change in one of
them will be reflected in the other one.
"""
name = entry.get_name()
logging.debug(f"[LINK] settings::{settings_id} <-> entry {name} [str]")
curr_value = settings.get_safe_string(settings_id)
if curr_value:
entry.set_text(curr_value)
settings.connect(f"changed::{settings_id}",
lambda s, k: entry.set_text(settings.get_safe_string(settings_id)))
entry.connect("changed",
lambda e: settings.set_string(settings_id, str(entry.get_text())))
def _link_gtk_switch_to_settings(settings: ApplicationSettings, switch: Gtk.Switch, settings_id: str):
"""
Link a Gtk.Switch to a GSettings ID, so any change in one of
them will be reflected in the other one.
"""
name = switch.get_name()
logging.debug(f"[LINK] settings::{settings_id} <-> switch {name} [bool]")
curr_value = settings.get_boolean(settings_id)
if curr_value:
switch.set_state(curr_value)
settings.connect(f"changed::{settings_id}",
lambda s, k: switch.set_state(settings.get_boolean(settings_id)))
switch.connect("state-set",
lambda _sw, _state: settings.set_boolean(settings_id, _state))
def _link_gtk_spinbutton_to_settings(settings: ApplicationSettings, spin: Gtk.SpinButton, settings_id: str):
"""
Link a Gtk.SpinButton to a GSettings ID, so any change in one of
them will be reflected in the other one.
"""
name = spin.get_name()
logging.debug(f"[LINK] settings::{settings_id} <-> spinbutton {name} [int]")
curr_value = settings.get_int(settings_id)
if curr_value:
spin.set_value(settings.get_int(settings_id))
settings.connect(f"changed::{settings_id}",
lambda s, k: spin.set_value(settings.get_int(settings_id)))
spin.connect("change-value",
lambda e: settings.set_int(settings_id, spin.get_value()))
def _link_gtk_combobox_to_settings(settings: ApplicationSettings, combo: Gtk.ComboBox, settings_id: str):
def combo_changed(*args):
tree_iter = combo.get_active_iter()
if tree_iter is not None:
model = combo.get_model()
text = model[tree_iter][0]
else:
entry = combo.get_child()
text = entry.get_text()
settings.set_string(settings_id, text)
def settings_changed(*args):
value = settings.get_safe_string(settings_id)
if value is None or value == "":
combo.set_active(0)
else:
model = combo.get_model()
for i in range(0, len(model)):
text = model[i][0]
if text == value:
combo.set_active(i)
return
entry = combo.get_child()
if hasattr(entry, "set_text"):
entry.set_text(value)
name = combo.get_name()
logging.debug(f"[LINK] settings::{settings_id} <-> combo {name} [str]")
settings_changed()
settings.connect(f"changed::{settings_id}", lambda s, k: settings_changed)
combo.connect("changed", combo_changed)
def link_widget_to_settings(settings: ApplicationSettings, widget: Gtk.Widget, settings_id: str):
"""
Link a Gtk.SpinButton to a GSettings ID, so any change in one of
them will be reflected in the other one.
"""
# note: take into account inheritance in these heuristics...
if isinstance(widget, Gtk.ComboBox):
_link_gtk_combobox_to_settings(settings, widget, settings_id)
elif isinstance(widget, Gtk.SpinButton):
_link_gtk_spinbutton_to_settings(settings, widget, settings_id)
elif isinstance(widget, Gtk.Switch):
_link_gtk_switch_to_settings(settings, widget, settings_id)
elif isinstance(widget, Gtk.Entry):
_link_gtk_entry_to_settings(settings, widget, settings_id)
else:
raise Exception("unsupported widget type to link")
###############################################################################
# settings UI
###############################################################################
class SettingsPage(Granite.SimpleSettingsPage):
"""
A settings page, with some convenience functions.
"""
# settings that will be reset when calling set_defaults()
_managed_settings = []
def __init__(self, settings: ApplicationSettings, **kwargs):
self._settings = settings
super().__init__(**kwargs)
self._entries = []
self._entries_area = self.get_content_area()
self._entries_area.set_halign(Gtk.Align.FILL)
self._entries_area.set_hexpand(True)
def append_entry(self, label, widget, setting=None):
# attach to the grid (see https://python-gtk-3-tutorial.readthedocs.io/en/latest/layout.html#grid)
count = len(self._entries)
self._entries_area.attach(label, 0, count, 1, 1)
self._entries_area.attach(widget, 1, count, 1, 1)
self._entries.append(widget)
if setting:
link_widget_to_settings(self._settings, widget, setting)
def append_labeled_entry(self, text, widget, setting=None):
label = Gtk.Label(text)
label.props.hexpand = False
label.props.halign = Gtk.Align.END
widget.props.halign = Gtk.Align.START
self.append_entry(label, widget, setting=setting)
def on_validate(self):
"""
Validate all the settings, raising an exception if something is wrong
"""
pass
def on_apply(self):
"""
Validate all the settings, raising an exception if something is wrong
"""
pass
def set_defaults(self):
"""
Set all the settings to the default values.
"""
for setting in self._managed_settings:
logging.debug(f"[UI] Resetting {setting} to default value")
self._settings.reset(setting)
|
src/tensorflow2.x-python-tutorial/binary-text-classification.py | pepure/SciSharp-Stack-Examples | 197 | 12668152 | import matplotlib.pyplot as plt
import os
import re
import shutil
import string
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import losses
from tensorflow.keras import preprocessing
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
print(tf.__version__)
# https://www.tensorflow.org/tutorials/keras/text_classification
url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
tf.debugging.set_log_device_placement(True);
dataset = tf.keras.utils.get_file("aclImdb_v1.tar.gz", url,
untar=True, cache_dir='C:/Users/haipi/AppData/Local/Temp/aclImdb_v1',
cache_subdir='')
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
sample_file = os.path.join(train_dir, 'pos/0_9.txt')
with open(sample_file) as f:
print(f.read())
# remove_dir = os.path.join(train_dir, 'unsup')
# shutil.rmtree(remove_dir)
batch_size = 32
seed = 42
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
train_dir,
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
for text_batch, label_batch in raw_train_ds.take(1):
for i in range(3):
print("Review", text_batch.numpy()[i])
print("Label", label_batch.numpy()[i])
print("Label 0 corresponds to", raw_train_ds.class_names[0])
print("Label 1 corresponds to", raw_train_ds.class_names[1])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
train_dir,
batch_size=batch_size,
validation_split=0.2,
subset='validation',
seed=seed)
test_dir = os.path.join(dataset_dir, 'test')
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
test_dir,
batch_size=batch_size)
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
return tf.strings.regex_replace(stripped_html,
'[%s]' % re.escape(string.punctuation),
'')
max_features = 10000
sequence_length = 250
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode='int',
output_sequence_length=sequence_length)
# Make a text-only dataset (without labels), then call adapt
train_text = raw_train_ds.map(lambda x, y: x)
vectorize_layer.adapt(train_text)
def vectorize_text(text, label):
text = tf.expand_dims(text, -1)
return vectorize_layer(text), label
# retrieve a batch (of 32 reviews and labels) from the dataset
text_batch, label_batch = next(iter(raw_train_ds))
first_review, first_label = text_batch[0], label_batch[0]
print("Review", first_review)
print("Label", raw_train_ds.class_names[first_label])
print("Vectorized review", vectorize_text(first_review, first_label))
print("1287 ---> ",vectorize_layer.get_vocabulary()[1287])
print(" 313 ---> ",vectorize_layer.get_vocabulary()[313])
print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary())))
train_ds = raw_train_ds.map(vectorize_text)
val_ds = raw_val_ds.map(vectorize_text)
test_ds = raw_test_ds.map(vectorize_text)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Create the model
embedding_dim = 16
model = tf.keras.Sequential([
layers.Embedding(max_features + 1, embedding_dim),
layers.Dropout(0.2),
layers.GlobalAveragePooling1D(),
layers.Dropout(0.2),
layers.Dense(1)])
model.summary()
model.compile(loss=losses.BinaryCrossentropy(from_logits=True),
optimizer='adam',
metrics=tf.metrics.BinaryAccuracy(threshold=0.0))
epochs = 30
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs)
loss, accuracy = model.evaluate(test_ds)
print("Loss: ", loss)
print("Accuracy: ", accuracy) |
codes/python/advanced/tfrecords.py | agnes-yang/TensorFlow-Course | 7,040 | 12668238 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""TFRecords.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1p-Nz6v3CyqKSc-QazX1FgvZkamt5T-uC
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Load MNIST data
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocessing
x_train = x_train / 255.0
x_test = x_test / 255.0
# Track the data type
dataType = x_train.dtype
print(f"Data type: {dataType}")
labelType = y_test.dtype
print(f"Data type: {labelType}")
im_list = []
n_samples_to_show = 16
c = 0
for i in range(n_samples_to_show):
im_list.append(x_train[i])
# Visualization
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure(figsize=(4., 4.))
# Ref: https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_axesgrid.html
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(4, 4), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
# Show image grid
for ax, im in zip(grid, im_list):
# Iterating over the grid returns the Axes.
ax.imshow(im, 'gray')
plt.show()
# Convert values to compatible tf.Example types.
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# Create the features dictionary.
def image_example(image, label, dimension):
feature = {
'dimension': _int64_feature(dimension),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image.tobytes()),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
record_file = 'mnistTrain.tfrecords'
n_samples = x_train.shape[0]
dimension = x_train.shape[1]
with tf.io.TFRecordWriter(record_file) as writer:
for i in range(n_samples):
image = x_train[i]
label = y_train[i]
tf_example = image_example(image, label, dimension)
writer.write(tf_example.SerializeToString())
# Create the dataset object from tfrecord file(s)
dataset = tf.data.TFRecordDataset(record_file)
# Decoding function
def parse_record(record):
name_to_features = {
'dimension': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
}
return tf.io.parse_single_example(record, name_to_features)
def decode_record(record):
image = tf.io.decode_raw(
record['image_raw'], out_type=dataType, little_endian=True, fixed_length=None, name=None
)
label = record['label']
dimension = record['dimension']
image = tf.reshape(image, (dimension, dimension))
return (image, label)
im_list = []
n_samples_to_show = 16
c = 0
for record in dataset:
c+=1
if c > n_samples_to_show:
break
parsed_record = parse_record(record)
decoded_record = decode_record(parsed_record)
image, label = decoded_record
im_list.append(image)
# Visualization
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure(figsize=(4., 4.))
# Ref: https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_axesgrid.html
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(4, 4), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
# Show image grid
for ax, im in zip(grid, im_list):
# Iterating over the grid returns the Axes.
ax.imshow(im, 'gray')
plt.show()
|
bin/warm_up_cache.py | thenetcircle/dino | 150 | 12668253 | import logging
import os
import sys
from dino.config import ConfigKeys
from dino.environ import env
DEFAULT_DAYS = 31
logger = logging.getLogger('warm_up_cache.py')
try:
days = env.config.get(ConfigKeys.WARMUP_DAYS, domain=ConfigKeys.CACHE_SERVICE, default=-1)
if days != -1:
try:
days = int(float(days))
except Exception as e1:
logger.error("could not parse configured days {}: {}".format(days, str(e)))
days = -1
if days < 0:
days = os.getenv('DINO_DAYS')
if days is None:
if len(sys.argv) > 1:
days = sys.argv[1]
else:
days = DEFAULT_DAYS
try:
days = int(float(days))
except ValueError as e:
logger.error("invalid days: {}: {}, using default value of {}".format(days, str(e), DEFAULT_DAYS))
days = DEFAULT_DAYS
except Exception as e:
logger.error("could not get days: {}".format(str(e)))
days = DEFAULT_DAYS
logger.info('caching all user ids...')
# not needed for wio
if 'wio' not in os.getenv('DINO_ENVIRONMENT'):
try:
all_users = env.db.get_all_user_ids()
logger.info('caching all user roles ({})...'.format(len(all_users)))
env.db.get_users_roles(all_users)
except NotImplementedError:
pass
logger.info('caching all rooms...')
try:
channels = env.db.get_channels()
logger.info('caching all rooms for channels ({})...'.format(len(channels)))
for channel_id in channels.keys():
env.db.rooms_for_channel(channel_id)
env.db.get_acls_in_channel_for_action(channel_id, 'list')
except NotImplementedError:
pass
logger.info('caching last {} days of online time...'.format(days))
try:
last_online_times = env.db.get_last_online_since(days=days)
logger.info('caching all last online time for {} users...'.format(len(last_online_times)))
env.cache.set_last_online(last_online_times)
except NotImplementedError:
pass
logger.info('done! cache warmed up')
|
notebook/scipy_sparse_convert.py | vhn0912/python-snippets | 174 | 12668338 | <reponame>vhn0912/python-snippets<filename>notebook/scipy_sparse_convert.py
from scipy.sparse import csr_matrix, lil_matrix
l = [[0, 10, 20],
[30, 0, 0],
[0, 0, 0]]
csr = csr_matrix(l)
print(csr)
# (0, 1) 10
# (0, 2) 20
# (1, 0) 30
print(type(csr))
# <class 'scipy.sparse.csr.csr_matrix'>
lil = csr.tolil()
print(lil)
# (0, 1) 10
# (0, 2) 20
# (1, 0) 30
print(type(lil))
# <class 'scipy.sparse.lil.lil_matrix'>
lil = lil_matrix(csr)
print(lil)
# (0, 1) 10
# (0, 2) 20
# (1, 0) 30
print(type(lil))
# <class 'scipy.sparse.lil.lil_matrix'>
lil[0, 0] = 100
print(lil.toarray())
# [[100 10 20]
# [ 30 0 0]
# [ 0 0 0]]
print(csr.toarray())
# [[ 0 10 20]
# [30 0 0]
# [ 0 0 0]]
lil2 = lil_matrix(lil)
print(lil2.toarray())
# [[100 10 20]
# [ 30 0 0]
# [ 0 0 0]]
lil[0, 0] = 0
print(lil2.toarray())
# [[ 0 10 20]
# [30 0 0]
# [ 0 0 0]]
lil2_copy = lil_matrix(lil, copy=True)
print(lil2_copy.toarray())
# [[ 0 10 20]
# [30 0 0]
# [ 0 0 0]]
lil[0, 0] = 100
print(lil2_copy.toarray())
# [[ 0 10 20]
# [30 0 0]
# [ 0 0 0]]
|
amodem/tests/test_calib.py | Matthew-MK/amodem | 766 | 12668389 | from amodem import calib
from amodem import common
from amodem import config
from io import BytesIO
import numpy as np
import random
import pytest
import mock
config = config.fastest()
class ProcessMock:
def __init__(self):
self.buf = BytesIO()
self.stdin = self
self.stdout = self
self.bytes_per_sample = 2
def write(self, data):
assert self.buf.tell() < 10e6
self.buf.write(data)
def read(self, n):
return self.buf.read(n)
def test_success():
p = ProcessMock()
calib.send(config, p, gain=0.5, limit=32)
p.buf.seek(0)
calib.recv(config, p)
def test_too_strong():
p = ProcessMock()
calib.send(config, p, gain=1.001, limit=32)
p.buf.seek(0)
for r in calib.detector(config, src=p):
assert not r['success']
assert r['msg'] == 'too strong signal'
def test_too_weak():
p = ProcessMock()
calib.send(config, p, gain=0.01, limit=32)
p.buf.seek(0)
for r in calib.detector(config, src=p):
assert not r['success']
assert r['msg'] == 'too weak signal'
def test_too_noisy():
r = random.Random(0) # generate random binary signal
signal = np.array([r.choice([-1, 1]) for i in range(int(config.Fs))])
src = BytesIO(common.dumps(signal * 0.5))
for r in calib.detector(config, src=src):
assert not r['success']
assert r['msg'] == 'too noisy signal'
def test_errors():
class WriteError(ProcessMock):
def write(self, data):
raise KeyboardInterrupt()
p = WriteError()
with pytest.raises(KeyboardInterrupt):
calib.send(config, p, limit=32)
assert p.buf.tell() == 0
class ReadError(ProcessMock):
def read(self, n):
raise KeyboardInterrupt()
p = ReadError()
with pytest.raises(KeyboardInterrupt):
calib.recv(config, p, verbose=True)
assert p.buf.tell() == 0
@pytest.fixture(params=[0] + [sign * mag for sign in (+1, -1)
for mag in (0.1, 1, 10, 100, 1e3, 2e3)])
def freq_err(request):
return request.param * 1e-6
def test_drift(freq_err):
freq = config.Fc * (1 + freq_err / 1e6)
t = np.arange(int(1.0 * config.Fs)) * config.Ts
frame_length = 100
rms = 0.5
signal = rms * np.cos(2 * np.pi * freq * t)
src = BytesIO(common.dumps(signal))
iters = 0
for r in calib.detector(config, src, frame_length=frame_length):
assert r['success'] is True
assert abs(r['rms'] - rms) < 1e-3
assert abs(r['total'] - rms) < 1e-3
iters += 1
assert iters > 0
assert iters == config.baud / frame_length
def test_volume():
with mock.patch('subprocess.check_call') as check_call:
ctl = calib.volume_controller('volume-control')
ctl(0.01)
ctl(0.421)
ctl(0.369)
ctl(1)
assert check_call.mock_calls == [
mock.call(shell=True, args='volume-control 1%'),
mock.call(shell=True, args='volume-control 42%'),
mock.call(shell=True, args='volume-control 37%'),
mock.call(shell=True, args='volume-control 100%')
]
with pytest.raises(AssertionError):
ctl(0)
with pytest.raises(AssertionError):
ctl(-0.5)
with pytest.raises(AssertionError):
ctl(12.3)
def test_send_max_volume():
with mock.patch('subprocess.check_call') as check_call:
calib.send(config, dst=BytesIO(), volume_cmd='ctl', limit=1)
assert check_call.mock_calls == [mock.call(shell=True, args='ctl 100%')]
def test_recv_binary_search():
buf = BytesIO()
gains = [0.5, 0.25, 0.38, 0.44, 0.41, 0.39, 0.40, 0.40]
for gain in gains:
calib.send(config, buf, gain=gain, limit=2)
buf.seek(0)
dump = BytesIO()
with mock.patch('subprocess.check_call') as check_call:
calib.recv(config, src=buf, volume_cmd='ctl', dump_audio=dump)
assert dump.getvalue() == buf.getvalue()
gains.append(gains[-1])
fmt = 'ctl {0:.0f}%'
expected = [mock.call(shell=True, args=fmt.format(100 * g)) for g in gains]
assert check_call.mock_calls == expected
def test_recv_freq_change():
p = ProcessMock()
calib.send(config, p, gain=0.5, limit=2)
offset = p.buf.tell() // 16
p.buf.seek(offset)
messages = [state['msg'] for state in calib.recv_iter(config, p)]
assert messages == [
'good signal', 'good signal', 'good signal',
'frequency change',
'good signal', 'good signal', 'good signal']
|
tests/data/src/compilewheel/simple/__init__.py | rsumnerz/pip | 7,089 | 12668429 | def spam(gen):
yield from gen
|
Support/Scripts/start-android-emulator.py | cclauss/ds2 | 225 | 12668438 | #!/usr/bin/env python3
##
## Copyright (c) 2014-present, Facebook, Inc.
## All rights reserved.
##
## This source code is licensed under the University of Illinois/NCSA Open
## Source License found in the LICENSE file in the root directory of this
## source tree. An additional grant of patent rights can be found in the
## PATENTS file in the same directory.
##
import os
import argparse
import subprocess
from typing import Dict, Any, Tuple
import sys
import copy
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--target", help="the target emulator to run")
parser.add_argument(
"--download-path",
help="the directory where the ndk and sdk are installed",
default="/tmp",
)
args = parser.parse_args()
return args
def clean_arguments(args: argparse.Namespace) -> argparse.Namespace:
if args.target:
os.environ["CIRCLE_JOB"] = args.target
elif os.environ.get("CIRCLE_JOB", None) is not None:
args.target = os.environ.get("CIRCLE_JOB", None)
else:
print("either the CIRCLE_JOB env var or the --target flag is required")
sys.exit(1)
return args
def get_arch_from_target(target: str) -> Tuple[str, str]:
target_arch_pairs = {
"Android-ARM": ("emulator64-arm", "arm"),
"Android-ARM64": ("emulator", "arm64"),
"Android-x86": ("emulator64-x86", "x86"),
}
archs = target_arch_pairs.get(target, None)
if archs is None:
print("either the TARGET env var or the --target flag is required")
sys.exit(1)
else:
return archs
def main() -> None:
args = parse_arguments()
args = clean_arguments(args)
emulator_arch, arch = get_arch_from_target(args.target)
sdk_dir = "{}/android-sdk-{}".format(args.download_path, sys.platform)
emulator = "{}/emulator/{}".format(sdk_dir, emulator_arch)
qt_lib_path = "{}/emulator/lib64/qt/lib".format(sdk_dir)
new_environment = os.environ.copy()
new_environment["LD_LIBRARY_PATH"] = qt_lib_path
subprocess.run(
[
emulator,
"-avd",
"android-test-{}".format(arch),
"-gpu",
"off",
"-no-window",
"-no-accel",
"-no-audio",
],
env=new_environment,
)
if __name__ == "__main__":
main()
|
tools/perf/contrib/cluster_telemetry/loading_ct.py | zealoussnow/chromium | 14,668 | 12668543 | <gh_stars>1000+
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contrib.cluster_telemetry import loading_base_ct
# pylint: disable=protected-access
class LoadingClusterTelemetry(loading_base_ct._LoadingBaseClusterTelemetry):
@classmethod
def Name(cls):
return 'loading.cluster_telemetry'
|
2018/finals/web-mitigator/app/main.py | iicarus-bit/google-ctf | 2,757 | 12668557 | #!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, redirect, request, make_response, send_from_directory
from google.cloud import tasks_v2beta3
import logging
from urllib.parse import urlencode, quote_plus
client = tasks_v2beta3.CloudTasksClient()
parent = client.queue_path('ctf-web-kuqo48d', 'europe-west1', 'xss')
SECRET_URL = '917fh3h5721'
app = Flask(__name__, static_url_path='')
app.url_map.host_matching = (__name__ != '__main__')
@app.route('/', host="bountyplz-mitigator.web.ctfcompetition.com", methods=['GET', 'POST'])
def bountyplz():
"""Return XSS and BugBounty Form (for PoCs/xss bot)"""
poc = request.form.get('poc', '')
if poc.startswith("http"):
task = {
'app_engine_http_request': {
'app_engine_routing': {
'service': 'uxssbot',
},
'http_method': 'POST',
'relative_uri': '/submit?%s'%urlencode({'service': 'mitigator', 'url': poc}, quote_via=quote_plus),
'body': urlencode({'service': 'mitigator', 'url': poc}, quote_via=quote_plus).encode(),
}
}
res = client.create_task(parent, task)
logging.info('Created uxssbot task: ' + res.name)
return """We have received your bug report."""
return """
<html>
<script src='https://www.google.com/recaptcha/api.js'></script>
<form method=post action=/>
<div class="g-recaptcha" data-sitekey="<KEY>"></div>
<input type=url name=poc placeholder="PoC URL" value=%s>
<input type=submit>
</form>
</html>"""%poc
@app.route('/' + SECRET_URL, host="mitigator.web.ctfcompetition.com")
def login():
"""Redirect the user to the SEM"""
token = request.cookies.get('flag', 'public-demo')
return redirect("/" + SECRET_URL + ".html?flag=" + token)
@app.route('/', host="mitigator.web.ctfcompetition.com")
def backtopwn():
"""Redirect the user to pwnable"""
return redirect("http://mitigator.ctfcompetition.com:1337/index.html")
@app.route('/' + SECRET_URL + '.html', host="mitigator.web.ctfcompetition.com")
def sem():
response = make_response(app.send_static_file('sem.html'))
response.headers['content-security-policy'] = "default-src 'none'; style-src 'sha256-a6K5yWfSJ1D3n7JPrbZVrFADjNGla8XNjwqREOH1FFs='; script-src 'sha256-hJezPHmyLh3996xSSmcHvy0la57OWfPoGhLKvt40LGA=' 'sha256-9TaiPuyxl5StNVGXWFGVh2SHM62NJ9KT462mtr8Jd7Q=' https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js; object-src 'none'; connect-src 'self'; report-uri https://bountyplz-mitigator.web.ctfcompetition.com/";
return response
@app.route('/secret/<path:path>', host="mitigator.web.ctfcompetition.com")
def proxy(path):
return app.send_static_file('secret/' + path)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
|
detector/common/gt_transformer.py | qiu9yu/Lets_OCR | 671 | 12668578 | <reponame>qiu9yu/Lets_OCR
###
# transform original gt to location gt
###
import os
def rawGT_to_locGT(in_path, out_path):
if not os.path.exists(out_path):
os.mkdir(out_path)
files_list = os.listdir(in_path)
for name in files_list:
in_file = os.path.join(in_path, name)
out_file = os.path.join(out_path, 'gt_'+name)
f1 = open(in_file, 'r')
#f1 = codecs.open(in_file, 'r', 'utf-8-sig')
lines = f1.readlines()
f1.close()
f2 = open(out_file, 'w+')
#print("img %s %s" % (in_file, lines))
for line in lines:
line.strip()
if line.split(',')[-2] == 'Arabic':
continue
loc = line.split(',')[:8]
str1 = ",".join(loc)
str1.strip()
#print("img %s raw str is %s" % (in_file, line))
#print("img %s aft str is %s" % (in_file, str1))
f2.write(str1)
f2.write('\n')
f2.close()
rawGT_to_locGT('/home/ljs/OCR_dataset/ali_ocr/train_1000/txt_1000', '/home/ljs/data_ready/ali_icpr/gt_1000')
#rawGT_to_locGT('/home/ljs/OCR_dataset/MLT/val_gt', '/home/ljs/OCR_dataset/MLT/val_loc_gt') |
Lib/test/test_compiler/testcorpus/32_func_global_nested.py | diogommartins/cinder | 1,886 | 12668640 | <gh_stars>1000+
def foo():
global bar
def bar():
pass
|
tests/catalyst/contrib/utils/test_pandas.py | gr33n-made/catalyst | 2,693 | 12668644 | # flake8: noqa
# import pandas as pd
# import pytest
#
# from catalyst.contrib.utils.pandas import (
# folds_to_list,
# split_dataframe_on_stratified_folds,
# )
#
#
# def test_folds_to_list():
# """@TODO: Docs. Contribution is welcome."""
# assert folds_to_list("1,2,1,3,4,2,4,6") == [1, 2, 3, 4, 6]
# assert folds_to_list([1, 2, 3.0, 5, 2, 1]) == [1, 2, 3, 5]
# assert folds_to_list([]) == []
#
# with pytest.raises(ValueError):
# folds_to_list([1, "True", 3.0, None, 2, 1])
#
#
# def _setup_data(num_rows=10):
# df_data = []
# for i in range(num_rows):
# if i < (num_rows / 2):
# df_data.append(["ants", "%s.jpg" % i, 0])
# else:
# df_data.append(["bees", "%s.jpg" % i, 1])
# return pd.DataFrame(df_data, columns=["tag", "filepath", "class"])
#
#
# def test_stratified_fold_split():
# """@TODO: Docs. Contribution is welcome."""
# df = _setup_data()
#
# splitted = split_dataframe_on_stratified_folds(
# dataframe=df, class_column="class"
# )
#
# assert int == splitted["fold"].dtype
# assert set(range(5)) == set(splitted["fold"].unique())
# ants_folds = set(splitted[splitted["tag"] == "ants"]["fold"])
# bees_folds = set(splitted[splitted["tag"] == "bees"]["fold"])
# assert ants_folds == bees_folds
#
#
# def test_stratified_fold_split_num_folds():
# """@TODO: Docs. Contribution is welcome."""
# df = _setup_data()
#
# splitted = split_dataframe_on_stratified_folds(df, "class", n_folds=2)
#
# assert set(range(2)) == set(splitted["fold"].unique())
|
o365spray/core/utils/defaults.py | hackedbyagirl/o365spray | 341 | 12668655 | #!/usr/bin/env python3
from datetime import datetime
# Get the current time in YYMMDDHHMM format to append
# to file names to keep each run distinct
F_TIME = datetime.now().strftime("%y%m%d%H%M")
class DefaultFiles:
"""Global output file name defaults"""
# Log and valid output files
LOG_FILE = "raw.log"
ENUM_FILE = f"enum_valid_accounts.{F_TIME}.txt"
SPRAY_FILE = f"spray_valid_credentials.{F_TIME}.txt"
# Tested files
ENUM_TESTED = f"enum_tested_accounts.{F_TIME}.txt"
SPRAY_TESTED = f"spray_tested_credentials.{F_TIME}.txt"
# Misc. files
ENUM_IDP = f"enum_found_idp_accounts.{F_TIME}.txt"
class Defaults:
"""Global default values"""
# ANSI escape code to clear line
ERASE_LINE = "\x1b[2K"
# Valid requests.request methods to work with
HTTP_METHODS = ["get", "options", "head", "post", "put", "patch", "delete"]
# HTTP Header Configuration
HTTP_HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:69.0) Gecko/20100101 Firefox/69.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
# https://docs.microsoft.com/en-us/azure/active-directory/develop/reference-aadsts-error-codes
# https://gist.github.com/byt3bl33d3r/19a48fff8fdc34cc1dd1f1d2807e1b7f
# This will be used for both Autodiscover and Azure AD
AADSTS_CODES = {
"AADSTS50053": ["LOCKED", "Account locked"],
"AADSTS50055": ["EXPIRED_PASS", "Password expired"],
"AADSTS50057": ["DISABLED", "User disabled"],
"AADSTS50126": ["INVALID_CREDS", "Invalid username or password"],
"AADSTS50059": ["MISSING_TENANT", "Tenant for account doesn't exist"],
"AADSTS50128": ["INVALID_DOMAIN", "Tenant for account doesn't exist"],
"AADSTS50034": ["USER_NOT_FOUND", "User does not exist"],
"AADSTS50079": ["VALID_MFA", "Response indicates MFA (Microsoft)"],
"AADSTS50076": ["VALID_MFA", "Response indicates MFA (Microsoft)"],
"AADSTS50158": [
"SEC_CHAL",
"Response indicates conditional access (MFA: DUO or other)",
],
"AADSTS500011": ["INVALID_RESOURCE", "Invalid resource name"],
"AADSTS700016": ["INVALID_APPID", "Invalid application client ID"],
}
# List of valid AADSTS codes to check against
VALID_AADSTS_CODES = [
"AADSTS500011", # INVALID_RESOURCE
"AADSTS700016", # INVALID_APPID
"AADSTS50055", # EXPIRED_PASS
"AADSTS50079", # VALID_MFA
"AADSTS50076", # VALID_MFA
"AADSTS50158", # SEC_CHAL
]
# List of substrings that can be found when BasicAuth is blocked
BASICAUTH_ERRORS = [
"Basic Auth Blocked",
"BasicAuthBlockStatus - Deny",
"BlockBasicAuth - User blocked",
]
|
tests/unit/mock_logs.py | senstb/aws-elastic-beanstalk-cli | 110 | 12668657 | <reponame>senstb/aws-elastic-beanstalk-cli<filename>tests/unit/mock_logs.py<gh_stars>100-1000
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
INSTANCE_TAIL_LOGS_RESPONSE = """-------------------------------------
/var/log/awslogs.log
-------------------------------------
{'skipped_events_count': 0, 'first_event': {'timestamp': 1522962583519, 'start_position': 559799L, 'end_position': 560017L}, 'fallback_events_count': 0, 'last_event': {'timestamp': 1522962583519, 'start_position': 559799L, 'end_position': 560017L}, 'source_id': '77b026040b93055eb448bdc0b59e446f', 'num_of_events': 1, 'batch_size_in_bytes': 243}
-------------------------------------
/var/log/httpd/error_log
-------------------------------------
[Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088)
-------------------------------------
/var/log/httpd/access_log
-------------------------------------
172.31.69.153 (172.16.58.3) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
-------------------------------------
/var/log/eb-activity.log
-------------------------------------
+ chown -R webapp:webapp /var/app/ondeck
[2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity...
-------------------------------------
/tmp/sample-app.log
-------------------------------------
2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2
-------------------------------------
/var/log/eb-commandprocessor.log
-------------------------------------
[2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook..."""
REQUEST_ENVIRONMENT_INFO_RESPONSE = {
"EnvironmentInfo": [
{
"InfoType": "tail",
"Ec2InstanceId": "i-024a31a441247971d",
"SampleTimestamp": "2018-04-06T01:05:43.875Z",
"Message": "https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"
},
{
"InfoType": "tail",
"Ec2InstanceId": "i-0dce0f6c5e2d5fa48",
"SampleTimestamp": "2018-04-06T01:05:43.993Z",
"Message": "https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"
},
{
"InfoType": "tail",
"Ec2InstanceId": "i-090689581e5afcfc6",
"SampleTimestamp": "2018-04-06T01:05:43.721Z",
"Message": "https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"
},
{
"InfoType": "tail",
"Ec2InstanceId": "i-053efe7c102d0a540",
"SampleTimestamp": "2018-04-06T01:05:43.900Z",
"Message": "https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"
}
]
}
|
usaspending_api/references/v1/serializers.py | g4brielvs/usaspending-api | 217 | 12668671 | from rest_framework import serializers
from usaspending_api.common.serializers import LimitableSerializer
from usaspending_api.references.models import Cfda, ObjectClass, RefProgramActivity, SubtierAgency, ToptierAgency
class ToptierAgencySerializer(LimitableSerializer):
class Meta:
model = ToptierAgency
fields = "__all__"
default_fields = ["toptier_code", "name", "abbreviation"]
class SubtierAgencySerializer(LimitableSerializer):
class Meta:
model = SubtierAgency
fields = "__all__"
default_fields = ["subtier_code", "name", "abbreviation"]
class CfdaSerializer(LimitableSerializer):
class Meta:
model = Cfda
fields = "__all__"
default_fields = ["id", "program_number", "program_title", "popular_name", "website_address", "objectives"]
class ProgramActivitySerializer(LimitableSerializer):
class Meta:
model = RefProgramActivity
fields = ("id", "program_activity_code", "program_activity_name")
class ObjectClassSerializer(LimitableSerializer):
class Meta:
model = ObjectClass
fields = ("id", "major_object_class", "major_object_class_name", "object_class", "object_class_name")
class FilterSerializer(serializers.Serializer):
hash = serializers.CharField()
class HashSerializer(serializers.Serializer):
json_str = serializers.CharField()
|
data/transcoder_evaluation_gfg/python/LEXICOGRAPHICAL_CONCATENATION_SUBSTRINGS_STRING.py | mxl1n/CodeGen | 241 | 12668688 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s ) ;
sub_count = ( n * ( n + 1 ) ) // 2 ;
arr = [ 0 ] * sub_count ;
index = 0 ;
for i in range ( n ) :
for j in range ( 1 , n - i + 1 ) :
arr [ index ] = s [ i : i + j ] ;
index += 1 ;
arr.sort ( ) ;
res = "" ;
for i in range ( sub_count ) :
res += arr [ i ] ;
return res ;
#TOFILL
if __name__ == '__main__':
param = [
('sqGOi',),
('848580',),
('01001110011001',),
('ZhWXUKmeiI',),
('0917296541285',),
('01101001111100',),
('tjP kR',),
('999907',),
('011100',),
('qJPHNSJOUj',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
tests/config.py | timgates42/tasktiger | 1,143 | 12668690 | import os
# How much to delay scheduled tasks for testing purposes.
# Note that on macOS Catalina, when using an unsigned Python version, taskgated
# (com.apple.securityd) needs to approve launching the process. We therefore
# need ample time here (> 0.3s) in order to prevent test failures.
DELAY = 0.4
# Redis database number which will be wiped and used for the tests
TEST_DB = int(os.environ.get('REDIS_DB', 7))
# Redis hostname
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
|
system/models/storage.py | z1pti3/jimi | 111 | 12668699 | <filename>system/models/storage.py
from pathlib import Path
import csv
import json
import jimi
class _storageTrigger(jimi.trigger._trigger):
storage_id = str()
file_type = "csv"
def doCheck(self):
self.result = { "events" : [], "var" : {}, "plugin" : {} }
storageFile = jimi.storage._storage().getAsClass(id=self.storage_id)
try:
storageFile = storageFile[0]
with open(Path(storageFile.getLocalFilePath()),'r') as f:
if self.file_type == "csv":
self.result["events"] = list(csv.DictReader(f))
elif self.file_type == "json":
self.result["events"] = json.load(f)
elif self.file_type == "txt":
self.result["events"] = f.readlines()
except:
pass
return self.result["events"]
|
e2e/cli/mount/operation/pyfs.py | ZMaratovna/cloud-pipeline | 126 | 12668703 | <reponame>ZMaratovna/cloud-pipeline
from ..utils import execute
def mkdir(folder_path, recursive=False):
execute('mkdir ' +
('-p ' if recursive else ' ') +
('"%s" ' % folder_path))
def rm(item_path, recursive=False, force=False, under=False):
execute('rm ' +
('-r ' if recursive else ' ') +
('-f ' if force else ' ') +
(('"%s" ' % item_path) if not under else ('"%s"/* ' % item_path)))
def touch(file_path):
execute('touch "%s"' % file_path)
def mv(old_path, new_path):
execute('mv "%s" "%s"' % (old_path, new_path))
def truncate(file_path, size):
execute('truncate -s "%s" "%s"' % (size, file_path))
def fallocate(file_path, size):
execute('fallocate -l "%s" "%s"' % (size, file_path))
def head(file_path, size=None, write_to=None, append_to=None):
return execute('head ' +
(('-c %s ' % size) if size else ' ') +
('"%s" ' % file_path) +
(('> "%s" ' % write_to) if write_to else ' ') +
(('>> "%s" ' % append_to) if append_to else ' '))
def tail(file_path, size=None, write_to=None, append_to=None):
return execute('tail ' +
(('-c %s ' % size) if size else ' ') +
('"%s" ' % file_path) +
(('> "%s" ' % write_to) if write_to else ' ') +
(('>> "%s" ' % append_to) if append_to else ' '))
def cp(source_file_path, destination_file_path):
execute('cp "%s" "%s"' % (source_file_path, destination_file_path))
|
ambari-common/src/main/python/resource_management/libraries/execution_command/module_configs.py | samyzh/ambari | 1,664 | 12668709 | <gh_stars>1000+
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["ModuleConfigs"]
class ModuleConfigs(object):
"""
This class maps to "/configurations" and "/configurationAttributes in command.json which includes configuration information of a service
"""
def __init__(self, configs, configAttributes):
self.__module_configs = configs
self.__module_config_attributes = configAttributes
def get_raw_config_dict(self):
"""
Sometimes the caller needs to access to module_configs directly
:return: config dict
"""
return self.__module_configs
def get_all_attributes(self, module_name, config_type):
"""
Retrieve attributes from /configurationAttributes/config_type
:param module_name:
:param config_type:
:return:
"""
if config_type not in self.__module_config_attributes:
return {}
try:
return self.__module_config_attributes[config_type]
except:
return {}
def get_all_properties(self, module_name, config_type):
if config_type not in self.__module_configs:
return {}
try:
return self.__module_configs[config_type]
except:
return {}
def get_properties(self, module_name, config_type, property_names, default=None):
properties = {}
try:
for property_name in property_names:
properties[property_name] = self.get_property_value(module_name, config_type, property_name, default)
except:
return {}
return properties
def get_property_value(self, module_name, config_type, property_name, default=None):
if config_type not in self.__module_configs or property_name not in self.__module_configs[config_type]:
return default
try:
value = self.__module_configs[config_type][property_name]
if value == None:
value = default
return value
except:
return default |
util/config.py | urootcom/TSDK | 331 | 12668722 | # coding:utf-8
# 目的:作为统一的配置加载,由这个来控制配置文件的更新以及导出
# 第一:文件检查,首先检查有没有配置文件对象,如果没有下载配置文件然后更新文件对象
# 第二:时间判断,请求文件获取到请求的缓存时间然后比对文件的创建时间,如果比创建时间大那么启动更新
import datetime
import pathlib
import os
class FileUpdate(object):
def __init__(self):
self.time = datetime.datetime.now()
def __get__(self,instance,instance_type):
pass
def __set__(self,instance,value):
pass
class Config(object):
def __init__(self):
self.H5_configURL = ''
self.Config_manager = {}
def getConfig(self,name):
return self.Config_manager.get(name,'')
class TSDKError(Exception):
def __init__(self,errInfo):
super().__init__(self)
self.errinfo = errInfo
def __setError(self):
pass
def __str__(self):
return self.errinfo
if __name__ == '__main__':
pass |
etc/pending_ugens/Pitch.py | butayama/supriya | 191 | 12668724 | <gh_stars>100-1000
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import MultiOutUGen
class Pitch(MultiOutUGen):
"""
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch
Pitch.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'source',
'init_frequency',
'min_frequency',
'max_frequency',
'exec_frequency',
'max_bins_per_octave',
'median',
'amp_threshold',
'peak_threshold',
'down_sample',
'clar',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
amp_threshold=0.01,
clar=0,
down_sample=1,
exec_frequency=100,
init_frequency=440,
max_bins_per_octave=16,
max_frequency=4000,
median=1,
min_frequency=60,
peak_threshold=0.5,
source=None,
):
MultiOutUGen.__init__(
self,
calculation_rate=calculation_rate,
amp_threshold=amp_threshold,
clar=clar,
down_sample=down_sample,
exec_frequency=exec_frequency,
init_frequency=init_frequency,
max_bins_per_octave=max_bins_per_octave,
max_frequency=max_frequency,
median=median,
min_frequency=min_frequency,
peak_threshold=peak_threshold,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def kr(
cls,
amp_threshold=0.01,
clar=0,
down_sample=1,
exec_frequency=100,
init_frequency=440,
max_bins_per_octave=16,
max_frequency=4000,
median=1,
min_frequency=60,
peak_threshold=0.5,
source=None,
):
"""
Constructs a control-rate Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.kr(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch
Pitch.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
amp_threshold=amp_threshold,
clar=clar,
down_sample=down_sample,
exec_frequency=exec_frequency,
init_frequency=init_frequency,
max_bins_per_octave=max_bins_per_octave,
max_frequency=max_frequency,
median=median,
min_frequency=min_frequency,
peak_threshold=peak_threshold,
source=source,
)
return ugen
# def newFromDesc(): ...
### PUBLIC PROPERTIES ###
@property
def amp_threshold(self):
"""
Gets `amp_threshold` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.amp_threshold
0.01
Returns ugen input.
"""
index = self._ordered_input_names.index('amp_threshold')
return self._inputs[index]
@property
def clar(self):
"""
Gets `clar` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.clar
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('clar')
return self._inputs[index]
@property
def down_sample(self):
"""
Gets `down_sample` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.down_sample
1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('down_sample')
return self._inputs[index]
@property
def exec_frequency(self):
"""
Gets `exec_frequency` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.exec_frequency
100.0
Returns ugen input.
"""
index = self._ordered_input_names.index('exec_frequency')
return self._inputs[index]
@property
def init_frequency(self):
"""
Gets `init_frequency` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.init_frequency
440.0
Returns ugen input.
"""
index = self._ordered_input_names.index('init_frequency')
return self._inputs[index]
@property
def max_bins_per_octave(self):
"""
Gets `max_bins_per_octave` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.max_bins_per_octave
16.0
Returns ugen input.
"""
index = self._ordered_input_names.index('max_bins_per_octave')
return self._inputs[index]
@property
def max_frequency(self):
"""
Gets `max_frequency` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.max_frequency
4000.0
Returns ugen input.
"""
index = self._ordered_input_names.index('max_frequency')
return self._inputs[index]
@property
def median(self):
"""
Gets `median` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.median
1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('median')
return self._inputs[index]
@property
def min_frequency(self):
"""
Gets `min_frequency` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.min_frequency
60.0
Returns ugen input.
"""
index = self._ordered_input_names.index('min_frequency')
return self._inputs[index]
@property
def peak_threshold(self):
"""
Gets `peak_threshold` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.peak_threshold
0.5
Returns ugen input.
"""
index = self._ordered_input_names.index('peak_threshold')
return self._inputs[index]
@property
def source(self):
"""
Gets `source` input of Pitch.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> pitch = supriya.ugens.Pitch.ar(
... amp_threshold=0.01,
... clar=0,
... down_sample=1,
... exec_frequency=100,
... init_frequency=440,
... max_bins_per_octave=16,
... max_frequency=4000,
... median=1,
... min_frequency=60,
... peak_threshold=0.5,
... source=source,
... )
>>> pitch.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
"""
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
paas-ce/paas/esb/components/bk/apis/fta/fta_component.py | renmcc/bk-PaaS | 767 | 12668757 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
from esb.utils.base import has_path_vars
from common.errors import error_codes
from components.component import BaseComponent, SetupConfMixin
from .toolkit import configs
class FtaComponent(BaseComponent, SetupConfMixin):
sys_name = configs.SYSTEM_NAME
def handle(self):
# 替换目标地址中的变量模版
path = self.dest_path
if has_path_vars(self.dest_path):
path_vars = self.request.path_vars and self.request.path_vars.val_dict or {}
try:
path = self.dest_path.format(**path_vars)
except KeyError, e:
raise error_codes.BUFFET_CANNOT_FORMAT_PATH.format_prompt('{%s}' % e.args[0])
# 请求参数
params, data = None, None
if self.dest_http_method == 'GET':
params = self.request.kwargs
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
elif self.dest_http_method == 'POST':
data = json.dumps(self.request.kwargs)
headers = {'Content-Type': 'application/json'}
if 'X-Secret' in self.request.headers:
headers.update({'X-Secret': self.request.headers['X-Secret']})
# 请求接口
response = self.outgoing.http_client.request(
self.dest_http_method,
configs.host,
path,
params=params,
data=data,
headers=headers,
timeout=60,
)
self.response.payload = response
|
bravado/_equality_util.py | educatedguessing/bravado | 600 | 12668760 | # -*- coding: utf-8 -*-
from itertools import chain
import typing
from six import iterkeys
def are_objects_equal(obj1, obj2, attributes_to_ignore=None):
# type: (typing.Any, typing.Any, typing.Optional[typing.Set[typing.Text]]) -> bool
"""
Helper method that checks if two objects are the same.
This is very generic and basically ensures that all the attributes
of both objects are defined and are the same.
NOTE: Sometimes some attribute do create recursive references to the same objects
and/or some objects do not define equality checks (ie. missing __eq__ method).
Those attributes might be ignored via attributes_to_ignore parameter
"""
if id(obj1) == id(obj2):
return True
if not isinstance(obj2, obj1.__class__):
return False
if attributes_to_ignore is None:
attributes_to_ignore = set()
for attr_name in set(
chain(
iterkeys(obj1.__dict__),
iterkeys(obj2.__dict__),
),
):
if attr_name in attributes_to_ignore:
continue
try:
if not (getattr(obj1, attr_name) == getattr(obj2, attr_name)):
return False
except AttributeError:
return False
return True
|
kicost/currency_converter/currency_tables.py | karlp/KiCost | 338 | 12668783 | <reponame>karlp/KiCost<gh_stars>100-1000
#!/usr/bin/python3
# -*- coding: utf-8 -*-
currency_symbols = {'AUD': 'A$',
'BGN': 'BGN',
'BRL': 'R$',
'CAD': 'CA$',
'CHF': 'CHF',
'CNY': 'CN¥',
'CZK': 'CZK',
'DKK': 'DKK',
'EUR': '€',
'GBP': '£',
'HKD': 'HK$',
'HRK': 'HRK',
'HUF': 'HUF',
'IDR': 'IDR',
'ILS': '₪',
'INR': '₹',
'ISK': 'ISK',
'JPY': '¥',
'KRW': '₩',
'MXN': 'MX$',
'MYR': 'MYR',
'NOK': 'NOK',
'NZD': 'NZ$',
'PHP': 'PHP',
'PLN': 'PLN',
'RON': 'RON',
'RUB': 'RUB',
'SEK': 'SEK',
'SGD': 'SGD',
'THB': 'THB',
'TRY': 'TRY',
'USD': '$',
'ZAR': 'ZAR',
}
currency_names = {'AUD': 'Australian Dollar',
'BGN': 'Bulgarian Lev',
'BRL': 'Brazilian Real',
'CAD': 'Canadian Dollar',
'CHF': 'Swiss Franc',
'CNY': 'Chinese Yuan',
'CZK': 'Czech Koruna',
'DKK': 'Danish Krone',
'EUR': 'Euro',
'GBP': 'British Pound',
'HKD': 'Hong Kong Dollar',
'HRK': 'Croatian Kuna',
'HUF': 'Hungarian Forint',
'IDR': 'Indonesian Rupiah',
'ILS': 'Israeli New Shekel',
'INR': 'Indian Rupee',
'ISK': 'Icelandic Króna',
'JPY': 'Japanese Yen',
'KRW': 'South Korean Won',
'MXN': 'Mexican Peso',
'MYR': 'Malaysian Ringgit',
'NOK': 'Norwegian Krone',
'NZD': 'New Zealand Dollar',
'PHP': 'Philippine Piso',
'PLN': 'Polish Zloty',
'RON': 'Romanian Leu',
'RUB': 'Russian Ruble',
'SEK': 'Swedish Krona',
'SGD': 'Singapore Dollar',
'THB': 'Thai Baht',
'TRY': 'Turkish Lira',
'USD': 'US Dollar',
'ZAR': 'South African Rand',
}
|
compiler/bitcells/replica_bitcell_1port.py | im-world/OpenRAM | 335 | 12668805 | <reponame>im-world/OpenRAM<filename>compiler/bitcells/replica_bitcell_1port.py
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import bitcell_base
from tech import cell_properties as props
from tech import parameter, drc
import logical_effort
class replica_bitcell_1port(bitcell_base.bitcell_base):
"""
A single bit cell (6T, 8T, etc.)
This module implements the single memory cell used in the design. It
is a hand-made cell, so the layout and netlist should be available in
the technology library. """
def __init__(self, name):
super().__init__(name, prop=props.bitcell_1port)
debug.info(2, "Create replica bitcell object")
def get_stage_effort(self, load):
parasitic_delay = 1
size = 0.5 # This accounts for bitline being drained thought the access TX and internal node
cin = 3 # Assumes always a minimum sizes inverter. Could be specified in the tech.py file.
read_port_load = 0.5 # min size NMOS gate load
return logical_effort.logical_effort('bitline', size, cin, load + read_port_load, parasitic_delay, False)
def input_load(self):
"""Return the relative capacitance of the access transistor gates"""
# FIXME: This applies to bitline capacitances as well.
access_tx_cin = parameter["6T_access_size"] / drc["minwidth_tx"]
return 2 * access_tx_cin
def analytical_power(self, corner, load):
"""Bitcell power in nW. Only characterizes leakage."""
from tech import spice
leakage = spice["bitcell_leakage"]
dynamic = 0 # FIXME
total_power = self.return_power(dynamic, leakage)
return total_power
def build_graph(self, graph, inst_name, port_nets):
"""Adds edges based on inputs/outputs. Overrides base class function."""
self.add_graph_edges(graph, port_nets)
def is_non_inverting(self):
"""Return input to output polarity for module"""
return False |
scripts/type_extractor/optimize_jsons.py | mehrdad-shokri/retdec | 4,816 | 12668807 | #!/usr/bin/env python3
"""1. Substitutes SHA1 hash in JSONs with natural numbers.
C types in different files will have different keys, therefore you shoud not
try to merge them!
2. Removes qualifier types.
"""
import multiprocessing
import sys
from type_extractor.arg_parser import get_arg_parser_for_optimize_jsons
from type_extractor.io import load_json_file
from type_extractor.io import print_json_file
from type_extractor.remove_json_types import remove_qualifier_json_types
from type_extractor.substitute_json_keys import substitute_json_keys_with_natural_numbers
from type_extractor.utils import get_files_with_suffix_from_all_paths
def parse_args():
"""Parses script arguments and returns them."""
parser = get_arg_parser_for_optimize_jsons(__doc__)
return parser.parse_args()
def optimize_json(json_file):
content = load_json_file(json_file)
substitute_json_keys_with_natural_numbers(content)
remove_qualifier_json_types(content)
with open(json_file, 'w') as out:
print_json_file(out, content, args.json_indent)
def main(args):
with multiprocessing.Pool() as pool:
pool.map(
optimize_json,
get_files_with_suffix_from_all_paths(args.path, '.json')
)
# We have to parse arguments and setup logging here because of the way the
# multiprocessing module works on Windows.
args = parse_args()
if __name__ == '__main__':
sys.exit(main(args))
|
panoptic_mapping_utils/src/rio/rio_player.py | YuePanEdward/panoptic_mapping | 101 | 12668832 | <filename>panoptic_mapping_utils/src/rio/rio_player.py
#!/usr/bin/env python3
import os
from struct import pack, unpack
import json
import numpy as np
import rospy
from cv_bridge import CvBridge
import tf
import tf2_ros
import cv2
from sensor_msgs.msg import Image, PointCloud2, PointField
from geometry_msgs.msg import Pose, TransformStamped
from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels
# These are the standard data-hashes used. Can be replaced via ROS-param.
DATA_IDS = [[
'0cac7578-8d6f-2d13-8c2d-bfa7a04f8af3',
'2451c041-fae8-24f6-9213-b8b6af8d86c1',
'ddc73793-765b-241a-9ecd-b0cebb7cf916',
'ddc73795-765b-241a-9c5d-b97744afe077'
],
[
'20c9939d-698f-29c5-85c6-3c618e00061f',
'f62fd5f8-9a3f-2f44-8b1e-1289a3a61e26'
]]
class CameraIntrinsics(object):
def __init__(self):
self.width = 0
self.height = 0
self.center_x = 0
self.center_y = 0
self.fx = 0
self.fy = 0
class RioPlayer(object):
def __init__(self):
""" Initialize ros node and read params """
# params
self.base_path = rospy.get_param(
'~base_path', '/home/lukas/Documents/Datasets/3RScan')
self.data_ids = rospy.get_param('~data_ids', DATA_IDS)
self.scene_id = rospy.get_param('~scene_id', 0)
self.scan_id = rospy.get_param('~scan_id', 0)
self.rate = float(rospy.get_param('~play_rate', 5)) # Hz
self.use_detectron = rospy.get_param('~use_detectron', 0)
self.frame_name = rospy.get_param('~frame_name', 'rio')
self.global_frame_name = rospy.get_param('~global_frame_name', 'world')
self.use_rendered_data = rospy.get_param('~use_rendered_data', False)
self.adjust_image_size = rospy.get_param('~adjust_image_size', True)
self.wait_time = rospy.get_param('~wait_time', 1.0) # s
# ROS
self.color_pub = rospy.Publisher("~color_image", Image, queue_size=10)
self.depth_pub = rospy.Publisher("~depth_image", Image, queue_size=10)
self.seg_pub = rospy.Publisher("~segmentation_image",
Image,
queue_size=10)
self.pose_pub = rospy.Publisher("~pose", Pose, queue_size=10)
self.pcl_pub = rospy.Publisher("~pointcloud",
PointCloud2,
queue_size=10)
if self.use_detectron:
self.label_pub = rospy.Publisher("~labels",
DetectronLabels,
queue_size=100)
# Get target path
self.data_id = self.data_ids[self.scene_id][self.scan_id]
# Read intrinsics
info_file = os.path.join(self.base_path, self.data_id, "sequence",
"_info.txt")
if not os.path.isfile(info_file):
rospy.logerr("[RIO Player] Info file '%s' does not exist." %
info_file)
else:
lines = open(info_file, 'r').readlines()
self.color_cam = CameraIntrinsics()
self.depth_cam = CameraIntrinsics()
# Currently hard coded since data format hopefully doesnt change.
self.color_cam.width = int(lines[2][15:])
self.color_cam.height = int(lines[3][16:])
self.color_cam.center_x = float(lines[7][30:].split()[2])
self.color_cam.center_y = float(lines[7][30:].split()[6])
self.color_cam.fx = float(lines[7][30:].split()[0])
self.color_cam.fy = float(lines[7][30:].split()[5])
self.depth_cam.width = int(lines[4][15:])
self.depth_cam.height = int(lines[5][16:])
self.depth_cam.center_x = float(lines[9][30:].split()[2])
self.depth_cam.center_y = float(lines[9][30:].split()[6])
self.depth_cam.fx = float(lines[9][30:].split()[0])
self.depth_cam.fy = float(lines[9][30:].split()[5])
# Get transform to reference
ref_file = os.path.join(self.base_path, "3RScan.json")
self.T_ref = np.eye(4)
if not os.path.isfile(ref_file):
rospy.logerr("[RIO Player] Meta data file '%s' does not exist." %
ref_file)
else:
with open(ref_file) as json_file:
index = json.load(json_file)
transform_found = False
transform_str = ""
for i in range(478):
info = index[i]
if transform_found:
break
if info['reference'] == self.data_id:
# It's a reference scan
transform_found = True
for scan in info['scans']:
if scan['reference'] == self.data_id:
transform = scan['transform']
for r in range(4):
for c in range(4):
self.T_ref[r, c] = transform[c * 4 + r]
transform_str += "%f " % transform[c * 4 +
r]
transform_found = True
break
if transform_found:
rospy.loginfo(
"[RIO Player] Initialized reference transform:")
self.static_tf = tf2_ros.StaticTransformBroadcaster()
msg = TransformStamped()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = self.global_frame_name
msg.child_frame_id = self.frame_name + "_ref"
msg.transform.translation.x = self.T_ref[0, 3]
msg.transform.translation.y = self.T_ref[1, 3]
msg.transform.translation.z = self.T_ref[2, 3]
rotation = tf.transformations.quaternion_from_matrix(
self.T_ref)
msg.transform.rotation.x = rotation[0]
msg.transform.rotation.y = rotation[1]
msg.transform.rotation.z = rotation[2]
msg.transform.rotation.w = rotation[3]
self.static_tf.sendTransform(msg)
# setup
self.cv_bridge = CvBridge()
self.tf_broadcaster = tf.TransformBroadcaster()
self.frame_no = 0
rospy.sleep(self.wait_time)
self.timer = rospy.Timer(rospy.Duration(1. / self.rate), self.timer_cb)
def timer_cb(self, _):
frame_name = "frame-%06d" % self.frame_no
time_stamp = rospy.Time.now()
# use color to check for existence.
color_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".color.jpg")
if not os.path.isfile(color_file):
rospy.logwarn("[RIO Player] No more frames found (published %i)." %
self.frame_no)
rospy.signal_shutdown("No more frames found.")
return
# transformation
pose_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".pose.txt")
pose_data = [float(x) for x in open(pose_file, 'r').read().split()]
transform = np.eye(4)
for row in range(4):
for col in range(4):
transform[row, col] = pose_data[row * 4 + col]
rotation = tf.transformations.quaternion_from_matrix(transform)
self.tf_broadcaster.sendTransform(
(transform[0, 3], transform[1, 3], transform[2, 3]), rotation,
time_stamp, self.frame_name, self.frame_name + "_ref")
pose_msg = Pose()
pose_msg.position.x = pose_data[3]
pose_msg.position.y = pose_data[7]
pose_msg.position.z = pose_data[11]
pose_msg.orientation.x = rotation[0]
pose_msg.orientation.y = rotation[1]
pose_msg.orientation.z = rotation[2]
pose_msg.orientation.w = rotation[3]
self.pose_pub.publish(pose_msg)
# depth image
if self.use_rendered_data:
depth_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".rendered.depth.png")
cv_depth = cv2.imread(depth_file, -1)
cv_depth = cv2.rotate(cv_depth, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv_depth = np.array(cv_depth, dtype=np.float32) / 1000
# Compute 3d cloud because it's needed anyway later
cols, rows = np.meshgrid(
np.linspace(0,
self.color_cam.width - 1,
num=self.color_cam.width),
np.linspace(0,
self.color_cam.height - 1,
num=self.color_cam.height))
im_x = (cols - self.color_cam.center_x) / self.color_cam.fx
im_y = (rows - self.color_cam.center_y) / self.color_cam.fy
else:
depth_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".depth.pgm")
cv_depth = cv2.imread(depth_file, -1)
cv_depth = np.array(cv_depth, dtype=np.float32) / 1000
# Compute 3d cloud because it's needed anyway later
cols, rows = np.meshgrid(
np.linspace(0,
self.depth_cam.width - 1,
num=self.depth_cam.width),
np.linspace(0,
self.depth_cam.height - 1,
num=self.depth_cam.height))
im_x = (cols - self.depth_cam.center_x) / self.depth_cam.fx
im_y = (rows - self.depth_cam.center_y) / self.depth_cam.fy
depth_msg = self.cv_bridge.cv2_to_imgmsg(cv_depth, "32FC1")
depth_msg.header.stamp = time_stamp
depth_msg.header.frame_id = self.frame_name
self.depth_pub.publish(depth_msg)
# color image
if self.use_rendered_data:
color_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".rendered.color.jpg")
cv_img = cv2.imread(color_file)
cv_img = cv2.rotate(cv_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
else:
cv_img = cv2.imread(color_file)
if self.adjust_image_size:
color_img = np.zeros(
(self.depth_cam.height, self.depth_cam.width, 3),
dtype=np.uint8)
im_u = im_x * self.color_cam.fx + self.color_cam.center_x
im_v = im_y * self.color_cam.fy + self.color_cam.center_y
im_u = np.array(np.clip(np.round(im_u), 0,
self.color_cam.width - 1),
dtype=int)
im_v = np.array(np.clip(np.round(im_v), 0,
self.color_cam.height - 1),
dtype=int)
for u in range(self.depth_cam.width):
for v in range(self.depth_cam.height):
color_img[v, u, :] = cv_img[im_v[v, u], im_u[v, u], :]
cv_img = color_img
color_msg = self.cv_bridge.cv2_to_imgmsg(cv_img, "bgr8")
color_msg.header.stamp = time_stamp
color_msg.header.frame_id = self.frame_name
self.color_pub.publish(color_msg)
# segmentation image
if self.use_detectron:
seg_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".predicted.png")
cv_seg = cv2.imread(seg_file)
# Load and publish labels.
labels_file = os.path.join(self.base_path, self.data_id,
"sequence",
frame_name + ".detectronlabels.json")
label_msg = DetectronLabels()
label_msg.header.stamp = time_stamp
with open(labels_file) as json_file:
data = json.load(json_file)
for d in data:
if 'instance_id' not in d:
d['instance_id'] = 0
if 'score' not in d:
d['score'] = 0
label = DetectronLabel()
label.id = d['id']
label.instance_id = d['instance_id']
label.is_thing = d['isthing']
label.category_id = d['category_id']
label.score = d['score']
label_msg.labels.append(label)
self.label_pub.publish(label_msg)
else:
seg_file = os.path.join(self.base_path, self.data_id, "sequence",
frame_name + ".panlabels.png")
cv_seg = cv2.imread(seg_file)
cv_seg = cv2.rotate(cv_seg, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv_seg = cv_seg[:, :, 0]
seg_img = np.zeros((self.depth_cam.height, self.depth_cam.width),
dtype=np.uint8)
for u in range(self.depth_cam.width):
for v in range(self.depth_cam.height):
seg_img[v, u] = cv_seg[im_v[v, u], im_u[v, u]]
cv_seg = seg_img
seg_msg = self.cv_bridge.cv2_to_imgmsg(cv_seg, "mono8")
seg_msg.header.stamp = time_stamp
seg_msg.header.frame_id = self.frame_name
self.seg_pub.publish(seg_msg)
# Create pointcloud
# get 3d cloud
points_x = im_x.reshape(-1)
points_y = im_y.reshape(-1)
points_z = cv_depth.reshape(-1)
# pack color image
r = np.ravel(cv_img[:, :, 2]).astype(int)
g = np.ravel(cv_img[:, :, 1]).astype(int)
b = np.ravel(cv_img[:, :, 0]).astype(int)
color = np.left_shift(r, 16) + np.left_shift(g, 8) + b
packed = pack('%di' % len(color), *color)
unpacked = unpack('%df' % len(color), packed)
rgb = np.array(unpacked)
# filter invalid points
# publish result
data = np.transpose(np.vstack((points_x, points_y, points_z, rgb)))
msg = PointCloud2()
msg.header.stamp = time_stamp
msg.header.frame_id = self.frame_name
msg.width = data.shape[0]
msg.height = 1
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('rgb', 12, PointField.FLOAT32, 1)
]
msg.is_bigendian = False
msg.point_step = 16
msg.row_step = msg.point_step * msg.width
msg.is_dense = True
msg.data = np.float32(data).tostring()
self.pcl_pub.publish(msg)
# finish
self.frame_no = self.frame_no + 1
if __name__ == '__main__':
rospy.init_node('rio_player', anonymous=True)
rio_player = RioPlayer()
rospy.spin()
|
ObjectOrientedProgramming/IdeFiles/3b_answer_python_package/distributions/__init__.py | vmukund100/dsnd_vm | 1,030 | 12668842 | from .Gaussiandistribution import Gaussian |
test_python_toolbox/test_dict_tools/test_remove_keys.py | hboshnak/python_toolbox | 119 | 12668873 | <filename>test_python_toolbox/test_dict_tools/test_remove_keys.py
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import numbers
from python_toolbox.dict_tools import remove_keys
def test():
'''Test the basic workings of `sum_dicts`.'''
origin_dict = {1: 2, 3: 4, 5: 6, 7: 8, 9: 10, 11: 12, 13: 14, 15: 16,}
not_divide_by_three_dict = dict(origin_dict)
remove_keys(not_divide_by_three_dict, range(0, 50, 3))
assert not_divide_by_three_dict == {1: 2, 5: 6, 7: 8, 11: 12, 13: 14}
below_ten_dict = dict(origin_dict)
remove_keys(below_ten_dict, lambda value: value >= 10)
assert below_ten_dict == {1: 2, 3: 4, 5: 6, 7: 8, 9: 10}
class HoledNumbersContainer:
'''Contains only numbers that have a digit with a hole in it.'''
def __contains__(self, number):
if not isinstance(number, numbers.Integral):
return False
return bool(set(str(number)).intersection({'0', '4', '6', '8', '9'}))
non_holed_numbers_dict = dict(origin_dict)
remove_keys(non_holed_numbers_dict, HoledNumbersContainer())
assert non_holed_numbers_dict == {1: 2, 3: 4, 5: 6, 7: 8, 11: 12, 13: 14,
15: 16,}
|
docs/components_page/components/spinner/size.py | glsdown/dash-bootstrap-components | 776 | 12668889 | import dash_bootstrap_components as dbc
from dash import html
spinners = html.Div(
[
dbc.Spinner(size="sm"),
html.Hr(),
dbc.Spinner(spinner_style={"width": "3rem", "height": "3rem"}),
]
)
|
algorithms/countingsort1.py | gajubadge11/HackerRank-1 | 340 | 12668923 | #!/bin/python3
import sys
def countingSort(arr):
output = [0] * (max(arr) + 1)
for el in arr:
output[el] += 1
return output
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = countingSort(arr)
print (" ".join(map(str, result)))
|
python/example_code/lookoutvision/datasets.py | cfuerst/aws-doc-sdk-examples | 5,166 | 12669000 | <gh_stars>1000+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Amazon lookout for Vision dataset code examples used in the service documentation:
https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/model-create-dataset.html
Shows how to create and manage datasets. Also, how to create a manifest file and
upload to an S3 bucket.
"""
import logging
import time
from datetime import datetime
import os
import json
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
class Datasets:
"""
Provides example functions for creating, listing and deleting Amazon Lookout
for Vision datasets. Also shows how to create a manifest file in an S3 bucket.
"""
@staticmethod
def create_dataset(lookoutvision_client, project_name, manifest_file, dataset_type):
"""
Creates a new Amazon Lookout for Vision dataset
:param lookoutvision_client: The Amazon Lookout for Vision Boto3 client.
:param project_name: The name of the project in which you want to
create a dataset.
:param bucket: The bucket that contains the manifest file.
:param manifest_file: The path and name of the manifest file.
:param dataset_type: The type of the dataset (train or test).
"""
try:
bucket, key = manifest_file.replace("s3://", "").split("/", 1)
# Create a dataset
logger.info("Creating %s dataset type...", dataset_type)
dataset = {
"GroundTruthManifest": {"S3Object": {"Bucket": bucket, "Key": key}}
}
response = lookoutvision_client.create_dataset(
ProjectName=project_name,
DatasetType=dataset_type,
DatasetSource=dataset,
)
logger.info("Dataset Status: %s", response["DatasetMetadata"]["Status"])
logger.info(
"Dataset Status Message: %s",
response["DatasetMetadata"]["StatusMessage"],
)
logger.info("Dataset Type: %s", response["DatasetMetadata"]["DatasetType"])
# Wait until either created or failed.
finished = False
status = ""
while finished is False:
dataset_description = lookoutvision_client.describe_dataset(
ProjectName=project_name, DatasetType=dataset_type
)
status = dataset_description["DatasetDescription"]["Status"]
if status == "CREATE_IN_PROGRESS":
logger.info("Dataset creation in progress...")
time.sleep(2)
continue
if status == "CREATE_COMPLETE":
logger.info("Dataset created.")
finished = True
continue
logger.info(
"Dataset creation failed: %s",
dataset_description["DatasetDescription"]["StatusMessage"],
)
finished = True
if status != "CREATE_COMPLETE":
message = dataset_description["DatasetDescription"]["StatusMessage"]
logger.exception("Couldn't create dataset: %s", message)
raise Exception(f"Couldn't create dataset: {message}")
except ClientError as err:
logger.exception(
"Service error: Couldn't create dataset: %s", err.response["Message"]
)
raise
@staticmethod
def create_manifest_file_s3(s3_resource, image_s3_path, manifest_s3_path):
"""
Creates a manifest file and uploads to S3.
:param image_s3_path: The S3 path to the images referenced by the manifest file.
The images must be in an S3 bucket with the following folder structure.
s3://my-bucket/<train or test>/
normal/
anomaly/
Place normal images in the normal folder. Anomalous images in the anomaly
folder.
https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/create-dataset-s3.html
:param manifest_s3_path: The S3 location in which to store the created
manifest file.
"""
try:
output_manifest_file = "temp.manifest"
# Current date and time in manifest file format
dttm = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
# get bucket and folder from image and manifest file paths
bucket, prefix = image_s3_path.replace("s3://", "").split("/", 1)
manifest_bucket, manifest_prefix = manifest_s3_path.replace(
"s3://", ""
).split("/", 1)
# create local temp manifest file
with open(output_manifest_file, "w") as mfile:
logger.info("Creating manifest file")
# create JSON lines for anomalous images
src_bucket = s3_resource.Bucket(bucket)
# create json lines for abnormal images.
for obj in src_bucket.objects.filter(
Prefix=prefix + "anomaly/", Delimiter="/"
):
image_path = f"s3://{src_bucket.name}/{obj.key}"
manifest = Datasets.create_json_line(image_path, "anomaly", dttm)
mfile.write(json.dumps(manifest) + "\n")
# create json lines for normal images
for obj in src_bucket.objects.filter(
Prefix=prefix + "normal/", Delimiter="/"
):
image_path = f"s3://{src_bucket.name}/{obj.key}"
manifest = Datasets.create_json_line(image_path, "normal", dttm)
mfile.write(json.dumps(manifest) + "\n")
# copy local manifest to target S3 location
logger.info("Uploading manifest file to %s", manifest_s3_path)
s3_resource.Bucket(manifest_bucket).upload_file(
output_manifest_file, manifest_prefix
)
# delete local manifest file
os.remove(output_manifest_file)
except ClientError as err:
logger.exception("S3 Service Error: %s", format(err))
raise
except Exception as err:
logger.exception(format(err))
raise
else:
logger.info("Completed manifest file creation and upload.")
@staticmethod
def create_json_line(image, class_name, dttm):
"""
Creates a single JSON line for an image.
:param image: The S3 location for the image.
:param label: The label for the image (normal or anomaly)
:param dttm: The date and time that the JSON is created.
"""
label = 0
if class_name == "normal":
label = 0
elif class_name == "anomaly":
label = 1
else:
logger.exception("Unexpected label value: %s for %s", str(label), image)
raise Exception(
"Unexpected label value: {} for {}".format(str(label), image)
)
manifest = {
"source-ref": image,
"anomaly-label": label,
"anomaly-label-metadata": {
"confidence": 1,
"job-name": "labeling-job/anomaly-label",
"class-name": class_name,
"human-annotated": "yes",
"creation-date": dttm,
"type": "groundtruth/image-classification",
},
}
return manifest
@staticmethod
def delete_dataset(lookoutvision_client, project_name, dataset_type):
"""
Deletes an Amazon Lookout for Vision dataset
:param lookoutvision_client: The Amazon Lookout for Vision Boto3 client.
:param project_name: The name of the project that contains the dataset that
you want to delete.
:param dataset_type: The type (train or test) of the dataset that you
want to delete.
"""
try:
# Delete the dataset
logger.info(
"Deleting the %s dataset for project %s.", dataset_type, project_name
)
lookoutvision_client.delete_dataset(
ProjectName=project_name, DatasetType=dataset_type
)
logger.info("Dataset deleted.")
except ClientError as err:
logger.exception(
"Service error: Couldn't delete dataset: %s.", err.response["Message"]
)
raise
@staticmethod
def describe_dataset(lookoutvision_client, project_name, dataset_type):
"""
Gets information about an Amazon Lookout for Vision dataset.
:param lookoutvision_client: The Amazon Lookout for Vision Boto3 client.
:param project_name: The name of the project that contains the dataset that
you want to describe.
:param dataset_type: The type (train or test) of the dataset that you want
to describe.
"""
try:
# Describe a dataset
response = lookoutvision_client.describe_dataset(
ProjectName=project_name, DatasetType=dataset_type
)
print(f"Name: {response['DatasetDescription']['ProjectName']}")
print(f"Type: {response['DatasetDescription']['DatasetType']}")
print(f"Status: {response['DatasetDescription']['Status']}")
print(f"Message: {response['DatasetDescription']['StatusMessage']}")
print(
f"Images: {str(response['DatasetDescription']['ImageStats']['Total'])}"
)
print(
f"Labeled: {str(response['DatasetDescription']['ImageStats']['Labeled'])}"
)
print(
f"Normal: {str(response['DatasetDescription']['ImageStats']['Normal'])}"
)
print(
f"Anomaly: {str(response['DatasetDescription']['ImageStats']['Anomaly'])}"
)
print("Done...")
except ClientError as err:
logger.exception(
"Service error: problem list datasets: %s", err.response["Message"]
)
print("Done")
|
server/ai/src/test_ttldict.py | doneva593/wifisystem | 4,349 | 12669010 | import time
from ttldict import TTLDict
def test_ttldict():
d=TTLDict(ttl=1)
d['foo']='bar'
assert 'foo' in d
assert d['foo'] == 'bar'
time.sleep(1)
assert 'foo' not in d |
Noise Reduction Script/Noise Reduction Script.py | avinashkranjan/PraticalPythonProjects | 930 | 12669050 | <gh_stars>100-1000
# Spectral Subtraction: Method used for noise reduction
import scipy.io.wavfile as wav
import numpy as np
import matplotlib.pyplot as plt
file = input("Enter the file path: ")
sr, data = wav.read(file)
fl = 400 #frame_length
frames = [] #empty list
for i in range(0,int(len(data)/(int(fl/2))-1)):
arr = data[int(i*int(fl/2)):int(i*int(fl/2)+fl)]
frames.append(arr) #appending each array data into the frames list
frames = np.array(frames) #converting the frames list into an array
ham_window = np.hamming(fl) #using np.hamming
windowed_frames = frames*ham_window #multiplying frames array with ham_window
dft = [] #empty list containing fft of windowed_frames
for i in windowed_frames:
dft.append(np.fft.fft(i)) #now taking the first fourier transform of each window
dft = np.array(dft) #converting dft into array
dft_mag_spec = np.abs(dft) #converting dft into absolute values
dft_phase_spec = np.angle(dft) #finding dft angle
noise_estimate = np.mean(dft_mag_spec,axis=0) #mean
noise_estimate_mag = np.abs(noise_estimate) #absolute value
estimate_mag = (dft_mag_spec-2*noise_estimate_mag) #subtraction method
estimate_mag[estimate_mag<0]=0
estimate = estimate_mag*np.exp(1j*dft_phase_spec) #calculating the final estimate
ift = [] #taking ift as input list containing inverse fourier transform of estimate
for i in estimate:
ift.append(np.fft.ifft(i)) #appending in ift list
clean_data = []
clean_data.extend(ift[0][:int(fl/2)]) #extending clean_data containg ift list
for i in range(len(ift)-1):
clean_data.extend(ift[i][int(fl/2):]+ift[i+1][:int(fl/2)])
clean_data.extend(ift[-1][int(fl/2):]) #extending clean_data containing ift list
clean_data = np.array(clean_data) #converting it into array
#finally plotting the graph showing the diffrence in the noise
fig = plt.figure(figsize=(8,5))
ax = plt.subplot(1,1,1)
ax.plot(np.linspace(0,64000,64000),data,label='Original',color="orange")
ax.plot(np.linspace(0,64000,64000),clean_data,label='Filtered',color="purple")
ax.legend(fontsize=12)
ax.set_title('Spectral Subtraction Method', fontsize=15)
filename = os.path.basename(file)
cleaned_file = "(Filtered_Audio)"+filename #final filtered audio
wav.write(cleaned_file,rate=sr, data = clean_data.astype(np.int16))
plt.savefig(filename+"(Spectral Subtraction graph).jpg") #saved file name as audio.wav(Spectral Subtraction graph).jpg
|
examples/salttiger.py | alphardex/looter | 145 | 12669052 | <reponame>alphardex/looter
"""
salttiger上的免费国外编程电子书
"""
import os
from pprint import pprint
import looter as lt
domain = 'https://salttiger.com'
def crawl(url):
tree = lt.fetch(url)
items = tree.css('ul.car-monthlisting li')
total = []
for item in items:
data = {}
data['name'] = item.css('a::text').extract_first()
data['url'] = item.css('a::attr(href)').extract_first()
data['comments'] = int(item.css('span::text').re_first(r'(\d+)'))
pprint(data)
total.append(data)
return total
if __name__ == '__main__':
task = f'{domain}/archives/'
result = crawl(task)
lt.save(result, name='salttiger.csv', sort_by='comments', order='desc')
|
core/python/src/nao/cli.py | tensorlang/nao | 332 | 12669065 | <reponame>tensorlang/nao<filename>core/python/src/nao/cli.py<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import datetime
import argparse
import json
import pprint
import re
import sys
import traceback
import gc
from os import path
from nao.compiler.compiler import Compiler
from nao.compiler.asset import graph_assets
from nao.structure import graph_io
from nao.structure import graph_query
from nao.structure import graph_xform
from nao.run import graph_execution
from nao.tool import graph_repl
import tensorflow as tf
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import graph_util
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import control_flow_pb2
import subprocess
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
pp = pprint.PrettyPrinter(indent=2, stream=sys.stderr).pprint
def main():
parser = argparse.ArgumentParser()
parser.add_argument("package_names", type=str, nargs='*')
parser.add_argument("--root", metavar='DIR', type=str,
help="""Specify root directory to search for imports from""")
parser.add_argument("--source", type=str,
help="""Specify source code instead of reading from file""")
parser.add_argument("--reopen-stderr", metavar='FILE', type=argparse.FileType('a', encoding='UTF-8'))
parser.add_argument("--reopen-stdout", metavar='FILE', type=argparse.FileType('a', encoding='UTF-8'))
parser.add_argument("--assets-fetch", default=False, action='store_const', const=True,
help="""Fetch any assets we don't already have.""")
parser.add_argument("--assets-root", metavar='DIR', type=str,
help="""Specify root directory for assets.""")
parser.add_argument("--metagraphdef", metavar='FILE', type=str,
help="""Graph file to load.""")
parser.add_argument("--binary-metagraphdef", default=False, action='store_const', const=True,
help="""Whether or not input is binary.""")
parser.add_argument("--feed-constants", metavar='FILE', type=str,
help="""Path to GraphDef protobuf with constants to feed""")
parser.add_argument("--feed-constants-strip", metavar='PREFIX', type=str, default="",
help="""Prefix to filter for (and strip from) constants""")
parser.add_argument("--feed-constants-prefix", metavar='PREFIX', type=str,
help="""Prefix to add to constant names in feed""")
parser.add_argument("--feed-constants-binary", default=False, action='store_const', const=True,
help="""Whether or not feed constant protobuf is binary""")
parser.add_argument("--run", default=False, action='store_const', const=True,
help="""Run the graph with given (or default) --result* and --feed-* options""")
parser.add_argument("--run-result-pattern", metavar='PATTERN', type=str, default="^(${package}/Main)/outputs/(.*)$",
help="""Pattern to discover run results.""")
parser.add_argument("--result-binary", default=False, action='store_const', const=True,
help="""Whether or not to result in binary.""")
parser.add_argument("--result", metavar='FILE', type=str, default="/dev/stdout")
parser.add_argument("--test", default=False, action='store_const', const=True,
help="""Run the tests graphs with given (or default) --test-* options""")
parser.add_argument("--test-result-pattern", metavar='PATTERN', type=str, default="^(${package}/Test[^/]*)/outputs/(.*)$",
help="""Pattern to discover test graph results.""")
parser.add_argument("--repl", default=False, action='store_const', const=True,
help="""Start REPL""")
parser.add_argument("--tensorboard", nargs='?', default="", metavar="IP:PORT",
help="""Start tensorboard server on the given address, with the given --log-root or --log-dir""")
parser.add_argument("--jupyter-kernel", nargs='?', default="", metavar="CONFIG_FILE",
help="""Start Jupyter kernel with the given configuration file""")
parser.add_argument("--train", default=False, action='store_const', const=True,
help="""Run train graphs with given (or default) --train-* options""")
parser.add_argument("--train-result-pattern", metavar='PATTERN', type=str, default="^(${package}/Train[^/]*)/outputs/(.*)$",
help="""Pattern to discover train graph results.""")
parser.add_argument("--workspace", metavar='DIR', type=str,
help="""Default value for workspace""")
parser.add_argument("--log-root", metavar='DIR', type=str,
help="""Which directory to calculate default log dir from.""")
parser.add_argument("--log-dir", metavar='DIR', type=str,
help="""Which directory to put logs in.""")
parser.add_argument("--output", default=False, action='store_const', const=True,
help="""Output graph""")
parser.add_argument("--output-root", metavar='DIR', type=str,
help="""When automatically constructing output path, use this as base""")
parser.add_argument("--output-name", metavar='NAME', type=str,
help="""Base name to use for output file name. Defaults to ${package} if there's only one.""")
parser.add_argument("--output-result-pattern", metavar='PATTERN', type=str, default="^(${package}/[^/]*)(/outputs/[^/]*)?$",
help="""Pattern to discover outputs of graph to output.""")
parser.add_argument("--output-format", metavar='FORMAT', type=str, default="metagraph",
help="""Defaults to metagraph""")
parser.add_argument("--output-binary", default=False, action='store_const', const=True,
help="""Whether or not to output in binary.""")
parser.add_argument("--output-file", metavar='FILE', type=str,
help="""Path to write output to. Defaults to ${output-name}.${output-format}""")
FLAGS = parser.parse_args()
if FLAGS.reopen_stderr:
os.close(sys.stderr.fileno())
os.dup2(FLAGS.reopen_stderr.fileno(), sys.stderr.fileno())
if FLAGS.reopen_stdout:
os.close(sys.stdout.fileno())
os.dup2(FLAGS.reopen_stdout.fileno(), sys.stdout.fileno())
package_names = FLAGS.package_names
should_parse = len(package_names) > 0 or FLAGS.source
if not (should_parse or FLAGS.run or FLAGS.test or FLAGS.output):
if os.isatty(1):
FLAGS.repl = True
if should_parse and not (FLAGS.repl or FLAGS.run or FLAGS.test or FLAGS.output):
FLAGS.output = True
def search_upwards(startdir, filename):
curdir = startdir
while True:
if path.exists(path.join(curdir, filename)):
return curdir
lastdir = curdir
curdir = path.dirname(curdir)
if curdir == lastdir:
return None
if not FLAGS.workspace:
FLAGS.workspace = os.environ.get("NAOPATH", "")
if not FLAGS.workspace:
FLAGS.workspace = search_upwards(os.getcwd(), ".naoconfig")
if not FLAGS.workspace:
FLAGS.workspace = "."
if FLAGS.assets_root is None:
FLAGS.assets_root = path.join(FLAGS.workspace, "assets")
if FLAGS.output_root is None:
FLAGS.output_root = path.join(FLAGS.workspace, "pkg")
if FLAGS.root is None:
FLAGS.root = path.join(FLAGS.workspace, "src")
if FLAGS.log_root is None:
FLAGS.log_root = path.join(FLAGS.workspace, "log")
if FLAGS.tensorboard is None:
FLAGS.tensorboard = "127.0.0.1:6006"
def log_dir_fn_fn(pkg_names):
if FLAGS.log_dir:
return lambda: FLAGS.log_dir
session_name = datetime.datetime.utcnow().strftime("%F_%H-%M-%S")
base_log_dir = path.join(FLAGS.log_root, pkg_names[0], session_name)
def log_dir_fn(run_id=None):
log_dir = base_log_dir
if run_id is not None:
log_dir = path.join(log_dir, "%04d" % run_id)
return log_dir
return log_dir_fn
def new_compiler():
return Compiler(
FLAGS.root,
FLAGS.output_root,
FLAGS.assets_root)
meta_graph_def = None
output_package_names = None
if should_parse:
p = new_compiler()
if FLAGS.source:
package_name = "main"
package_names = [package_name]
p.put_source(package_name + ".nao", FLAGS.source)
p.resolve_import_path(package_name)
else:
# Look for matching packages _train
if FLAGS.train:
output_package_names = package_names[:]
package_names.extend([pkg + "_train" for pkg in package_names])
for package_name in package_names:
p.resolve_import_path(package_name)
meta_graph_def = p.meta_graph_def()
p = None
# print("parsed", expressions)
# We need to do this so we clean up references to py_funcs. LAME.
gc.collect()
# Sometimes we want to output different packages than we're testing, training, etc.
if output_package_names == None:
output_package_names = package_names
if not FLAGS.output_name and len(output_package_names) == 1:
FLAGS.output_name = output_package_names[0]
if FLAGS.train:
FLAGS.output_name += "_trained"
if FLAGS.metagraphdef:
package_names = ["[^/]+"]
meta_graph_def = graph_io.read_meta_graph_def(
FLAGS.metagraphdef,
FLAGS.binary_metagraphdef)
if FLAGS.output and FLAGS.output_name and not FLAGS.output_file:
output_suffix = "." + FLAGS.output_format + ".pb"
if not FLAGS.output_binary:
output_suffix += "txt"
FLAGS.output_file = FLAGS.output_root + "/" + FLAGS.output_name + output_suffix
# Now that we know our package names, use them to target the proper results.
package_pattern = "(?:" + str.join("|", package_names) + ")"
FLAGS.test_result_pattern = FLAGS.test_result_pattern.replace("${package}", package_pattern)
FLAGS.train_result_pattern = FLAGS.train_result_pattern.replace("${package}", package_pattern)
FLAGS.run_result_pattern = FLAGS.run_result_pattern.replace("${package}", package_pattern)
output_package_pattern = "(?:" + str.join("|", output_package_names) + ")"
FLAGS.output_result_pattern = FLAGS.output_result_pattern.replace("${package}", output_package_pattern)
eprint("FLAGS", FLAGS)
eprint("package_names", package_names)
if FLAGS.tensorboard != "":
tb_host, tb_port = FLAGS.tensorboard.split(':', 1)
tb_logdir = FLAGS.log_dir or FLAGS.log_root
if tb_port is not None:
tb_port = int(tb_port)
from nao.tool import tensorboard_server
sys.exit(tensorboard_server.main(tb_logdir, tb_host=tb_host, tb_port=tb_port))
if FLAGS.jupyter_kernel != "":
jupyter_config_file = FLAGS.jupyter_kernel
from nao.tool import jupyter_kernel, jupyter_kernel_driver
if jupyter_config_file:
eprint("Reading jupyter_config file '%s'..." % jupyter_config_file)
jupyter_config = json.loads("".join(open(jupyter_config_file).readlines()))
else:
import uuid
jupyter_config = {
'control_port' : 0,
'hb_port' : 0,
'iopub_port' : 0,
'ip' : '127.0.0.1',
'key' : str(<KEY>()),
'shell_port' : 0,
'signature_scheme' : 'hmac-sha256',
'stdin_port' : 0,
'transport' : 'tcp'
}
pallet_parser = new_compiler()
repl_session = graph_repl.ReplSession(pallet_parser, log_dir_fn_fn(["jupyter"]))
driver = jupyter_kernel_driver.Driver(repl_session)
sys.exit(jupyter_kernel.Kernel(jupyter_config, driver.info(), driver.do).run())
def feed_dict_fn():
feed_dict = {}
# Properly find and strip prefix of constants, loading them with given prefix to feed_dict
if FLAGS.feed_constants:
feed_graph_def = graph_io.read_graph_def(FLAGS.feed_constants, FLAGS.feed_constants_binary)
constants = graph_query.find_nodes_with_prefix(feed_graph_def, FLAGS.feed_constants_strip)
constants_dict = graph_xform.constants_as_dict(constants)
strip_prefix = FLAGS.feed_constants_strip
add_prefix = FLAGS.feed_constants_prefix
for name, value in constants_dict.items():
if strip_prefix != None:
if name.startswith(strip_prefix):
name = name[len(strip_prefix):]
else:
continue
feed_dict[add_prefix + name + ":0"] = value
asset_map = graph_assets.load_asset_map(tf.get_default_graph())
eprint("asset_map", asset_map)
assets_by_path = {}
missing_assets = {}
for asset_name, asset in asset_map.items():
asset_path = path.join(FLAGS.assets_root, asset_name)
assets_by_path[asset_path] = asset
feed_dict[asset["placeholder"]] = asset_path
if not os.path.exists(asset_path):
missing_assets[asset_path] = asset
if len(missing_assets) > 0:
if not FLAGS.assets_fetch:
raise Exception("Missing assets: %s" % missing_assets)
for asset_path, asset in missing_assets.items():
graph_assets.maybe_download(asset_path, asset["url"])
eprint("feed_dict", feed_dict)
return feed_dict
if FLAGS.train:
def post_train(session, result_scope_prefixes):
graph = session.graph
trained_var_name_bs = set()
for result_scope_prefix in result_scope_prefixes:
collection_name = "%s:variable_names" % result_scope_prefix
eprint("collection_name", collection_name)
for var_name_b in graph.get_collection_ref(collection_name):
trained_var_name_bs.add(var_name_b)
var_names = [b.decode('utf-8') for b in trained_var_name_bs]
vars = graph_query.find_variables_by_name(
graph.get_collection_ref("variables"),
var_names)
eprint("saving vars", var_names, vars)
graph_xform.replace_variable_initializers_with_current_values(
graph,
vars,
"Trained")
graph_execution.import_and_run_meta_graph(
meta_graph_def=meta_graph_def,
feed_dict_fn=feed_dict_fn,
result_pattern=re.compile(FLAGS.train_result_pattern),
finish_session_fn=post_train,
log_dir_fn=lambda x: log_dir_fn_fn(x)(),
)
meta_graph_def, _ = meta_graph.export_scoped_meta_graph()
if FLAGS.test:
graph_execution.import_and_run_meta_graph(
meta_graph_def=meta_graph_def,
feed_dict_fn=feed_dict_fn,
log_dir_fn=lambda x: log_dir_fn_fn(x)(),
result_pattern=re.compile(FLAGS.test_result_pattern),
)
if meta_graph_def and FLAGS.output_file:
eprint("meta_graph_def", [n.name for n in meta_graph_def.graph_def.node])
graph_def = meta_graph_def.graph_def
output_re = re.compile(FLAGS.output_result_pattern)
output_node_names = ['py_funcs_json'] # HACK(adamb) So that pyfuncs still work.
var_names = set()
for n in graph_def.node:
m = output_re.match(n.name)
if not m:
continue
output_node_names.append(n.name)
# If this isn't a function, then we're covered. Otherwise pick up needed
# variables.
if not m.group(2):
continue
# Look for collection of variable names referenced by this function.
collection_name = "%s:variable_names" % m.group(1)
eprint("collection_name", collection_name)
function_var_name_bs = meta_graph_def.collection_def[collection_name].bytes_list.value
for var_name_b in function_var_name_bs:
# Remember the name of each variable referenced.
var_names.add(var_name_b.decode('utf-8'))
eprint("var_names", var_names)
eprint("output_node_names", output_node_names)
graph_xform.strip_meta_graph(meta_graph_def, output_node_names, var_names)
if FLAGS.output_file:
output_dirname = os.path.dirname(FLAGS.output_file)
if not os.path.exists(output_dirname):
os.makedirs(output_dirname)
if FLAGS.output_format == "metagraph":
graph_io.write_meta_graph_def(
meta_graph_def=meta_graph_def,
file=FLAGS.output_file,
binary=FLAGS.output_binary)
elif FLAGS.output_format == "graph":
# If we trained and we're outputting a graph_def, we'll need to modify it.
# We'll need to replace all the trained variables with the *constants* that
# their initializers refer to.
if FLAGS.train:
pass
graph_io.write_graph_def(
graph_def=meta_graph_def.graph_def,
file=FLAGS.output_file,
binary=FLAGS.output_binary)
if FLAGS.run:
results = graph_execution.import_and_run_meta_graph(
meta_graph_def=meta_graph_def,
feed_dict_fn=feed_dict_fn,
log_dir_fn=lambda x: log_dir_fn_fn(x)(),
result_pattern=re.compile(FLAGS.run_result_pattern),
)
graph_def = graph_xform.dict_as_graph_def(results)
graph_io.write_graph_def(
graph_def,
file=FLAGS.result,
binary=FLAGS.result_binary,
)
if FLAGS.repl:
graph_repl.run(new_compiler(), log_dir_fn_fn(["repl"]))
if __name__ == '__main__':
try:
main()
# with tf.device("/cpu:0"):
# main()
except Exception as ex:
# TODO(adamb) Should do *real* error printing.
# NOTE(adamb) Need to correlate expressions with line and character numbers!
traceback.print_exc(file=sys.stderr)
sys.exit(1)
|
questions/55366046/main.py | sesu089/stackoverflow | 302 | 12669092 | <gh_stars>100-1000
from PyQt5 import QtCore, QtGui, QtWidgets, QtSvg
class PianoKey(QtWidgets.QGraphicsRectItem):
def __init__(self, black=False, rect = QtCore.QRectF(), parent=None):
super(PianoKey, self).__init__(rect, parent)
self.m_pressed = False
self.m_selectedBrush = QtGui.QBrush()
self.m_brush = QtGui.QBrush(QtCore.Qt.black) if black else QtGui.QBrush(QtCore.Qt.white)
self.m_black = black
def setPressedBrush(self, brush):
self.m_selectedBrush = brush
def paint(self, painter, option, widget):
rendered = QtSvg.QSvgRenderer("key.svg")
black_pen = QtGui.QPen(QtCore.Qt.black, 1)
gray_pen = QtGui.QPen(QtGui.QBrush(QtCore.Qt.gray), 1,
QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
if self.m_pressed:
if self.m_selectedBrush.style() != QtCore.Qt.NoBrush:
painter.setBrush(self.m_selectedBrush)
else:
painter.setBrush(QtWidgets.QApplication.palette().highlight())
else:
painter.setBrush(self.m_brush);
painter.setPen(black_pen)
painter.drawRoundedRect(self.rect(), 15, 15, QtCore.Qt.RelativeSize)
if self.m_black:
rendered.render(painter, self.rect())
else:
points = [
QtCore.QPointF(self.rect().left()+1.5, self.rect().bottom()-1),
QtCore.QPointF(self.rect().right()-1, self.rect().bottom()-1),
QtCore.QPointF(self.rect().right()-1, self.rect().top()+1)
]
painter.setPen(gray_pen)
painter.drawPolyline(QtGui.QPolygonF(points))
def mousePressEvent(self, event):
self.m_pressed = True
self.update()
super(PianoKey, self).mousePressEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
self.m_pressed = False
self.update()
super(PianoKey, self).mouseReleaseEvent(event)
KEYWIDTH, KEYHEIGHT = 18, 72
class PianoKeyBoard(QtWidgets.QGraphicsView):
def __init__(self, num_octaves=2, parent=None):
super(PianoKeyBoard, self).__init__(parent)
self.initialize()
self.m_numOctaves = num_octaves
scene = QtWidgets.QGraphicsScene(QtCore.QRectF(0, 0, KEYWIDTH * self.m_numOctaves * 7, KEYHEIGHT), self)
self.setScene(scene)
numkeys = self.m_numOctaves * 12
for i in range(numkeys):
octave = i//12*7
j = i % 12
if j >= 5: j += 1
if j % 2 == 0:
x = (octave + j/2)*KEYWIDTH
key = PianoKey(rect=QtCore.QRectF(x, 0, KEYWIDTH, KEYHEIGHT), black=False)
else:
x = (octave + j//2) * KEYWIDTH + KEYWIDTH * 6//10 + 1
key = PianoKey(rect=QtCore.QRectF(x, 0, KEYWIDTH * 8//10 - 1, KEYHEIGHT * 6//10 ), black=True)
key.setZValue(1)
key.setPressedBrush(QtWidgets.QApplication.palette().highlight())
self.scene().addItem(key)
def initialize(self):
self.setAttribute(QtCore.Qt.WA_InputMethodEnabled, False)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate)
self.setRenderHints(QtGui.QPainter.Antialiasing|
QtGui.QPainter.TextAntialiasing |
QtGui.QPainter.SmoothPixmapTransform)
self.setOptimizationFlag(QtWidgets.QGraphicsView.DontClipPainter, True)
self.setOptimizationFlag(QtWidgets.QGraphicsView.DontSavePainterState, True)
self.setOptimizationFlag(QtWidgets.QGraphicsView.DontAdjustForAntialiasing, True)
self.setBackgroundBrush(QtWidgets.QApplication.palette().base())
def resizeEvent(self, event):
super(PianoKeyBoard, self).resizeEvent(event)
self.fitInView(self.scene().sceneRect(), QtCore.Qt.KeepAspectRatio)
def sizeHint(self):
return self.mapFromScene(self.sceneRect()).boundingRect().size()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyle('fusion')
w = QtWidgets.QWidget()
lay = QtWidgets.QVBoxLayout(w)
lay.addWidget(QtWidgets.QLabel("Piano Keyboard", alignment=QtCore.Qt.AlignCenter))
lay.addWidget(PianoKeyBoard())
w.resize(640, 480)
w.show()
sys.exit(app.exec_()) |
fewshots/data/load.py | bertinetto/r2d2 | 111 | 12669140 | import os
import numpy as np
import torch
from functools import partial
from torchnet.transform import compose
from torchnet.dataset import ListDataset, TransformDataset
from fewshots.data.base import convert_dict, CudaTransform, EpisodicBatchSampler
from fewshots.data.setup import setup_images
from fewshots.data.cache import Cache
from fewshots.utils import filter_opt
from fewshots.data.SetupEpisode import SetupEpisode
root_dir = ''
def extract_episode(setup_episode, augm_opt, d):
# data: N x C x H x W
n_max_examples = d[0]['data'].size(0)
n_way, n_shot, n_query = setup_episode.get_current_setup()
example_inds = torch.randperm(n_max_examples)[:(n_shot + n_query)]
support_inds = example_inds[:n_shot]
query_inds = example_inds[n_shot:]
xs_list = [d[i]['data'][support_inds] for i in range(augm_opt['n_augment'])]
# concatenate as shots into xs
xs = torch.cat(xs_list, dim=0)
# extract queries from a single cache entry
xq = d[np.random.randint(augm_opt['n_augment'])]['data'][query_inds]
out_dict = {'class': d[0]['class'], 'xs': xs, 'xq': xq, 'n_way': n_way, 'n_shot': n_shot, 'n_query': n_query}
return out_dict
def load_data(opt, splits):
global root_dir
root_dir = opt['data.root_dir']
augm_opt = filter_opt(opt, 'augm')
dataset = opt['data.dataset']
split_dir = os.path.join(opt['data.root_dir'], opt['data.dataset'], 'splits', opt['data.split'])
ret = {}
# cache = {}
cache = Cache()
for split in splits:
if split in ['val1', 'val5', 'test']:
n_way = opt['data.test_way']
else:
n_way = opt['data.way']
if split in ['train', 'trainval']:
# random shots
SE = SetupEpisode(batch_size=opt['data.batch_size'], shot_max=opt['data.shot_max'],
fixed_shot=opt['data.shot'], way_min=opt['data.way_min'], fixed_way=n_way)
elif split == 'val1':
SE = SetupEpisode(batch_size=opt['data.batch_size'], shot_max=opt['data.shot_max'], fixed_shot=1,
way_min=opt['data.way_min'], fixed_way=n_way)
elif split == 'val5':
SE = SetupEpisode(batch_size=opt['data.batch_size'], shot_max=opt['data.shot_max'], fixed_shot=5,
way_min=opt['data.way_min'], fixed_way=n_way)
else:
SE = SetupEpisode(batch_size=opt['data.batch_size'], shot_max=opt['data.shot_max'],
fixed_shot=opt['data.test_shot'], way_min=opt['data.way_min'], fixed_way=n_way)
if split in ['val1', 'val5', 'test']:
n_episodes = opt['data.test_episodes']
else:
n_episodes = opt['data.train_episodes']
transforms = [partial(convert_dict, 'class'),
partial(load_class_images, split, dataset, cache, augm_opt),
partial(extract_episode, SE, augm_opt)]
if opt['data.cuda']:
transforms.append(CudaTransform())
transforms = compose(transforms)
class_names = []
split_file = 'val.txt' if split in ['val1', 'val5'] else "{:s}.txt".format(split)
with open(os.path.join(split_dir, split_file), 'r') as f:
for class_name in f.readlines():
class_names.append(class_name.rstrip('\n'))
ds = TransformDataset(ListDataset(class_names), transforms)
sampler = EpisodicBatchSampler(SE, len(ds), n_episodes)
# use num_workers=0, otherwise may receive duplicate episodes
ret[split] = torch.utils.data.DataLoader(ds, batch_sampler=sampler, num_workers=0)
return ret
def load_class_images(split, dataset, cache, augm_opt, d):
if d['class'] in cache.data.keys():
if len(cache.data[d['class']]) < augm_opt['cache_size']:
init_entry = False
setup_images(split, d, cache, dataset, init_entry, root_dir, augm_opt)
else:
init_entry = True
setup_images(split, d, cache, dataset, init_entry, root_dir, augm_opt)
cache_len = len(cache.data[d['class']])
# if cache does not enough shots yet, repeat
if cache_len < augm_opt['n_augment']:
rand_ids = np.random.choice(cache_len, size=augm_opt['n_augment'], replace=True)
else:
rand_ids = np.random.choice(cache_len, size=augm_opt['n_augment'], replace=False)
out_dicts = [{'class': d['class'], 'data': cache.data[d['class']][rand_ids[i]]} for i in
range(augm_opt['n_augment'])]
return out_dicts
|
floss/render/default.py | mandiant/flare-floss | 145 | 12669220 | <gh_stars>100-1000
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
import io
import textwrap
import collections
from typing import List, Tuple, Union
import tabulate
import floss.utils as util
import floss.logging_
from floss.render import Verbosity
from floss.results import AddressType, StackString, TightString, DecodedString, ResultDocument, StringEncoding
from floss.render.sanitize import sanitize
MIN_WIDTH_LEFT_COL = 22
MIN_WIDTH_RIGHT_COL = 82
DISABLED = "Disabled"
tabulate.PRESERVE_WHITESPACE = True
logger = floss.logging_.getLogger(__name__)
class StringIO(io.StringIO):
def writeln(self, s):
self.write(s)
self.write("\n")
def width(s: str, character_count: int) -> str:
"""pad the given string to at least `character_count`"""
if len(s) < character_count:
return s + " " * (character_count - len(s))
else:
return s
def render_meta(results: ResultDocument, ostream, verbose):
rows: List[Tuple[str, str]] = list()
if verbose == Verbosity.DEFAULT:
rows.append((width("file path", MIN_WIDTH_LEFT_COL), width(results.metadata.file_path, MIN_WIDTH_RIGHT_COL)))
else:
rows.extend(
[
(width("file path", MIN_WIDTH_LEFT_COL), width(results.metadata.file_path, MIN_WIDTH_RIGHT_COL)),
("start date", results.metadata.runtime.start_date.strftime("%Y-%m-%d %H:%M:%S")),
("runtime", strtime(results.metadata.runtime.total)),
("version", results.metadata.version),
("imagebase", f"0x{results.metadata.imagebase:x}"),
("min string length", f"{results.metadata.min_length}"),
]
)
rows.append(("extracted strings", ""))
rows.extend(render_string_type_rows(results))
if verbose > Verbosity.DEFAULT:
rows.extend(render_function_analysis_rows(results))
ostream.write(tabulate.tabulate(rows, tablefmt="psql"))
ostream.write("\n")
def render_string_type_rows(results: ResultDocument) -> List[Tuple[str, str]]:
return [
(
" static strings",
str(len(results.strings.static_strings)) if results.analysis.enable_static_strings else DISABLED,
),
(
" stack strings",
str(len(results.strings.stack_strings)) if results.analysis.enable_stack_strings else DISABLED,
),
(
" tight strings",
str(len(results.strings.tight_strings)) if results.analysis.enable_tight_strings else DISABLED,
),
(
" decoded strings",
str(len(results.strings.decoded_strings)) if results.analysis.enable_decoded_strings else DISABLED,
),
]
def render_function_analysis_rows(results) -> List[Tuple[str, str]]:
if results.metadata.runtime.vivisect == 0:
return [("analyzed functions", DISABLED)]
rows = [
("analyzed functions", ""),
(" discovered", results.analysis.functions.discovered),
(" library", results.analysis.functions.library),
]
if results.analysis.enable_stack_strings:
rows.append((" stack strings", str(results.analysis.functions.analyzed_stack_strings)))
if results.analysis.enable_tight_strings:
rows.append((" tight strings", str(results.analysis.functions.analyzed_tight_strings)))
if results.analysis.enable_decoded_strings:
rows.append((" decoded strings", str(results.analysis.functions.analyzed_decoded_strings)))
if results.analysis.functions.decoding_function_scores:
rows.append(
(
" identified decoding functions\n (offset and score)",
textwrap.fill(
", ".join(
[
f"0x{fva:x} ({d:.3f})"
for fva, d in results.analysis.functions.decoding_function_scores.items()
]
),
max(len(results.metadata.file_path), MIN_WIDTH_RIGHT_COL),
),
)
)
return rows
def strtime(seconds):
m, s = divmod(seconds, 60)
return f"{m:02.0f}:{s:02.0f}"
def render_staticstrings(strings, ostream, verbose, disable_headers):
render_heading("FLOSS STATIC STRINGS", len(strings), ostream, disable_headers)
ascii_strings = list(filter(lambda s: s.encoding == StringEncoding.ASCII, strings))
unicode_strings = list(filter(lambda s: s.encoding == StringEncoding.UTF16LE, strings))
ascii_offset_len = 0
unicode_offset_len = 0
if ascii_strings:
ascii_offset_len = len(f"{ascii_strings[-1].offset}")
if unicode_strings:
unicode_offset_len = len(f"{unicode_strings[-1].offset}")
offset_len = max(ascii_offset_len, unicode_offset_len)
render_heading("FLOSS ASCII STRINGS", len(ascii_strings), ostream, disable_headers)
for s in ascii_strings:
if verbose == Verbosity.DEFAULT:
ostream.writeln(s.string)
else:
ostream.writeln(f"0x{s.offset:>0{offset_len}x} {s.string}")
ostream.writeln("")
render_heading("FLOSS UTF-16LE STRINGS", len(unicode_strings), ostream, disable_headers)
for s in unicode_strings:
if verbose == Verbosity.DEFAULT:
ostream.writeln(s.string)
else:
ostream.writeln(f"0x{s.offset:>0{offset_len}x} {s.string}")
def render_stackstrings(
strings: Union[List[StackString], List[TightString]], ostream, verbose: bool, disable_headers: bool
):
if verbose == Verbosity.DEFAULT:
for s in strings:
ostream.writeln(sanitize(s.string))
else:
if strings:
ostream.write(
tabulate.tabulate(
[
(
util.hex(s.function),
util.hex(s.program_counter),
util.hex(s.frame_offset),
sanitize(s.string),
)
for s in strings
],
headers=("Function", "Function Offset", "Frame Offset", "String") if not disable_headers else (),
)
)
ostream.write("\n")
def render_decoded_strings(decoded_strings: List[DecodedString], ostream, verbose, disable_headers):
"""
Render results of string decoding phase.
"""
if verbose == Verbosity.DEFAULT:
for ds in decoded_strings:
ostream.writeln(sanitize(ds.string))
else:
strings_by_functions = collections.defaultdict(list)
for ds in decoded_strings:
strings_by_functions[ds.decoding_routine].append(ds)
for fva, data in strings_by_functions.items():
render_heading(f" FUNCTION at 0x{fva:x}", len(data), ostream, disable_headers)
rows = []
for ds in data:
if ds.address_type in (AddressType.HEAP, AddressType.STACK):
offset_string = f"({ds.address_type})"
else:
offset_string = hex(ds.address or 0)
rows.append((offset_string, hex(ds.decoded_at), sanitize(ds.string)))
if rows:
ostream.write(
tabulate.tabulate(rows, headers=("Offset", "Called At", "String") if not disable_headers else ())
)
ostream.writeln("\n")
def render_heading(heading, n, ostream, disable_headers):
"""
example::
-------------------------------
| FLOSS STATIC STRINGS (1337) |
-------------------------------
"""
if disable_headers:
return
heading = f"| {heading} ({n}) |"
ostream.write(tabulate.tabulate([[heading]]))
ostream.write("\n")
def render(results, verbose, disable_headers):
ostream = StringIO()
if not disable_headers:
ostream.writeln("")
ostream.write(f"FLARE FLOSS RESULTS (version {results.metadata.version})\n")
render_meta(results, ostream, verbose)
ostream.writeln("")
if results.analysis.enable_static_strings:
render_staticstrings(results.strings.static_strings, ostream, verbose, disable_headers)
ostream.writeln("")
if results.analysis.enable_stack_strings:
render_heading("FLOSS STACK STRINGS", len(results.strings.stack_strings), ostream, disable_headers)
render_stackstrings(results.strings.stack_strings, ostream, verbose, disable_headers)
ostream.writeln("")
if results.analysis.enable_tight_strings:
render_heading("FLOSS TIGHT STRINGS", len(results.strings.tight_strings), ostream, disable_headers)
render_stackstrings(results.strings.tight_strings, ostream, verbose, disable_headers)
ostream.writeln("")
if results.analysis.enable_decoded_strings:
render_heading("FLOSS DECODED STRINGS", len(results.strings.decoded_strings), ostream, disable_headers)
render_decoded_strings(results.strings.decoded_strings, ostream, verbose, disable_headers)
return ostream.getvalue()
|
src/django-nonrel/tests/regressiontests/urlpatterns_reverse/views.py | adamjmcgrath/glancydesign | 790 | 12669245 | from django.http import HttpResponse
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
|
rls/nn/learningrates.py | StepNeverStop/RLs | 371 | 12669291 | <reponame>StepNeverStop/RLs
#!/usr/bin/env python3
# encoding: utf-8
from torch.optim import lr_scheduler
LR_REGISTER = {}
LR_REGISTER['lambda'] = lambda optimizer, max_step: lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: max(0, 1 - step / max_step))
LR_REGISTER['step'] = lr_scheduler.StepLR
LR_REGISTER['exp'] = lr_scheduler.ExponentialLR
LR_REGISTER['cos'] = lr_scheduler.CosineAnnealingLR
class ConsistentLR(lr_scheduler._LRScheduler):
def get_lr(self):
return [group['lr'] for group in self.optimizer.param_groups]
LR_REGISTER['default'] = ConsistentLR
|
main.py | karan306/aml | 303 | 12669316 | <filename>main.py
import numpy as np
import tensorflow as tf
from trainer import *
from trainer256 import *
from config import get_config
from utils import prepare_dirs_and_logger, save_config
import pdb, os
def main(config):
prepare_dirs_and_logger(config)
if config.gpu>-1:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=str(config.gpu)
config.data_format = 'NHWC'
if 1==config.model:
trainer = PG2(config)
trainer.init_net()
elif 11==config.model:
trainer = PG2_256(config)
trainer.init_net()
if config.is_train:
save_config(config)
trainer.train()
else:
# if not config.load_path:
# raise Exception("[!] You should specify `load_path` to load a pretrained model")
trainer.test()
if __name__ == "__main__":
config, unparsed = get_config()
main(config)
|
manage.py | billboggs/elasticsearch-HQ | 2,026 | 12669320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'royrusso'
import os
import sys
from flask_migrate import MigrateCommand
from flask_script import Command, Manager, Option, Server as _Server
from elastichq import create_app
from elastichq.globals import db, socketio
manager = Manager(create_app)
class Server(_Server):
"""
From https://github.com/miguelgrinberg/flack/blob/master/manage.py
This is the only way to call ./manage.py runserver, as flask-socketio blocks the call otherwise.
"""
help = description = 'Runs the Socket.IO web server'
host = '0.0.0.0'
port = 5000
use_debugger = False
use_reloader = False
default_url = 'http://localhost:9200'
def get_options(self):
options = (
Option('-H', '--host',
dest='host',
default=self.host),
Option('-P', '--port',
dest='port',
type=int,
default=self.port),
Option('-d', '--debug',
action='store_true',
dest='use_debugger',
help=('enable the Werkzeug debugger (DO NOT use in '
'production code)'),
default=self.use_debugger),
Option('-r', '--reload',
action='store_true',
dest='use_reloader',
help=('monitor Python files for changes (not 100%% safe '
'for production use)'),
default=self.use_reloader),
Option('-R', '--no-reload',
action='store_false',
dest='use_reloader',
help='do not monitor Python files for changes',
default=self.use_reloader),
Option('-u', '--url',
action='store_false',
dest='url',
help='Default url for initial display screen',
default=self.default_url)
)
return options
def __call__(self, app, host, port, use_debugger, use_reloader, url):
# override the default runserver command to start a Socket.IO server
if use_debugger is None:
use_debugger = app.debug
if use_debugger is None:
use_debugger = True
if use_reloader is None:
use_reloader = app.debug
socketio.run(app,
host=host,
port=port,
debug=use_debugger,
use_reloader=use_reloader,
**self.server_options)
class CleanDB(Command):
"""
Drops the database. A new database will be populated with tables, on the next application start.
"""
def run(self):
app = create_app()
db.drop_all(app=app)
class PyTest(Command):
"""
Runs all unittests. You MUST make sure that all clusters configured are running for the tests to pass!
"""
version = None
def get_options(self):
options = (
Option('-E', '--esv',
dest='version',
default=self.version),
)
return options
def run(self, version):
import pytest
tests_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tests')
sys.path.append(os.path.abspath(tests_path))
default_test_args = [
tests_path,
'--ignore=node_modules',
'--verbose',
'--color=yes',
'-c=' + tests_path + '/pytest.ini',
'--docker-compose-remove-volumes',
# '-s', # WARNING: Turning this on, breaks the tests on a Mac.
'--cov=' + tests_path,
'--cov-report=html:' + tests_path + '/htmlcov',
'--self-contained-html'
]
sig = None
if version is None:
hq_args = [
'--docker-compose=' + tests_path + '/hq_docker-compose.yml',
'--html=' + tests_path + '/htmlout/hq_ops.html',
'-m=hq_ops'
]
sig = pytest.main(default_test_args + hq_args)
if version is not None:
if "2" in version:
v2_args = ['--docker-compose=' + tests_path + '/v2_docker-compose.yml', '-m=es_versions',
'--html=' + tests_path + '/htmlout/es_2.html']
sig = pytest.main(default_test_args + v2_args)
if "5" in version:
v5_args = ['--docker-compose=' + tests_path + '/v5_docker-compose.yml', '-m=es_versions',
'--html=' + tests_path + '/htmlout/es_5.html']
sig = pytest.main(default_test_args + v5_args)
if "6" in version:
v6_args = ['--docker-compose=' + tests_path + '/v6_docker-compose.yml', '-m=es_versions',
'--html=' + tests_path + '/htmlout/es_6.html']
sig = pytest.main(default_test_args + v6_args)
if "7" in version:
v7_args = ['--docker-compose=' + tests_path + '/v7_docker-compose.yml', '-m=es_versions',
'--html=' + tests_path + '/htmlout/es_7.html']
sig = pytest.main(default_test_args + v7_args)
return sig
manager.add_command("runserver", Server())
manager.add_command('clean-db', CleanDB)
manager.add_command('run-tests', PyTest)
"""
This draws on https://github.com/miguelgrinberg/Flask-Migrate
To upgrade DB:
1. python manage.py db migrate (GENERATE the migration scripts)
2. python manage.py db upgrade (PERFORMS the update ddl)
"""
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
projects/Python/tests/test_send_receive.py | kokizzu/FastBinaryEncoding | 563 | 12669325 | import proto
from proto import proto
from unittest import TestCase
class MySender(proto.Sender):
def on_send(self, buffer, offset, size):
# Send nothing...
return 0
class MyReceiver(proto.Receiver):
def __init__(self):
super().__init__()
self._order = False
self._balance = False
self._account = False
def check(self):
return self._order and self._balance and self._account
def on_receive_ordermessage(self, value):
self._order = True
def on_receive_balancemessage(self, value):
self._balance = True
def on_receive_accountmessage(self, value):
self._account = True
class TestSendReceive(TestCase):
@staticmethod
def send_and_receive(index1, index2):
sender = MySender()
# Create and send a new order
order = proto.Order(1, "EURUSD", proto.OrderSide.buy, proto.OrderType.market, 1.23456, 1000.0)
sender.send(proto.OrderMessage(order))
# Create and send a new balance wallet
balance = proto.Balance("USD", 1000.0)
sender.send(proto.BalanceMessage(balance))
# Create and send a new account with some orders
account = proto.Account(1, "Test", proto.State.good, proto.Balance("USD", 1000.0), proto.Balance("EUR", 100.0))
account.orders.append(proto.Order(1, "EURUSD", proto.OrderSide.buy, proto.OrderType.market, 1.23456, 1000.0))
account.orders.append(proto.Order(2, "EURUSD", proto.OrderSide.sell, proto.OrderType.limit, 1.0, 100.0))
account.orders.append(proto.Order(3, "EURUSD", proto.OrderSide.buy, proto.OrderType.stop, 1.5, 10.0))
sender.send(proto.AccountMessage(account))
receiver = MyReceiver()
# Receive data from the sender
index1 %= sender.buffer.size
index2 %= sender.buffer.size
index2 = max(index1, index2)
receiver.receive(sender.buffer, 0, index1)
receiver.receive(sender.buffer, index1, index2 - index1)
receiver.receive(sender.buffer, index2, sender.buffer.size - index2)
return receiver.check()
def test_send_and_receive(self):
for i in range(100):
for j in range(100):
self.assertTrue(self.send_and_receive(i, j))
|
tools/copy_recursive.py | cdavis5e/wine-mono | 179 | 12669338 | <gh_stars>100-1000
# Copy a directory recursively WITHOUT the race condition in cp -r.
# This also ignores symbolic links.
import errno
import os
import shutil
import sys
if sys.version_info.major >= 3:
file_exists_error = FileExistsError
def is_file_exists_error(e):
return True
else:
file_exists_error = OSError
def is_file_exists_error(e):
return e.errno == errno.EEXIST
def copy_recursive(src, destdir):
dest = os.path.join(destdir, os.path.basename(src))
if os.path.isdir(src):
try:
os.mkdir(dest)
except file_exists_error as e:
if not is_file_exists_error(e):
raise
for filename in os.listdir(src):
path = os.path.join(src, filename)
copy_recursive(path, dest)
elif os.path.islink(src):
pass
elif os.path.isfile(src):
shutil.copy(src, dest)
else:
raise Exception('unknown file type for: '+src)
if len(sys.argv) < 3:
print('Usage: copy_recursive.py FILE DIRECTORY')
copy_recursive(sys.argv[1], sys.argv[2])
|
self_driving/ml_training/util.py | cclauss/self_driving_pi_car | 724 | 12669400 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import tensorflow as tf
import numpy as np
command2int = {"up": 0, "left": 1, "right": 2}
int2command = {i[1]: i[0] for i in command2int.items()}
def run_test(testClass):
"""
Function to run all the tests from a class of tests.
:param testClass: class for testing
:type testClass: unittest.TesCase
"""
suite = unittest.TestLoader().loadTestsFromTestCase(testClass)
unittest.TextTestRunner(verbosity=2).run(suite)
def reconstruct_from_record(record_path, bound=1000):
"""
Function to transform a tf records into a tuple of
np arrays. The size is controled by the param "bound".
:param record_path: path to tf_record
:type record_path: str
:param bound: number of examples to be read
:type bound: int
:return: images, labels, shape
:rtype: np.array, np.array, tuple
"""
reconstructed_images = []
reconstructed_labels = []
record_iterator = tf.python_io.tf_record_iterator(path=record_path)
for i, string_record in enumerate(record_iterator):
if i <= bound:
example = tf.train.Example()
example.ParseFromString(string_record)
height = int(example.features.feature['height'].int64_list.value[0]) # noqa
width = int(example.features.feature['width'].int64_list.value[0]) # noqa
channels = int(example.features.feature['channels'].int64_list.value[0]) # noqa
img_string = (example.features.feature['image_raw']
.bytes_list
.value[0])
annotation_string = (example.features.feature['labels_raw']
.bytes_list
.value[0])
reconstructed_img = np.fromstring(img_string, dtype=np.uint8)
reconstructed_annotation = np.fromstring(annotation_string,
dtype=np.uint8)
reconstructed_images.append(reconstructed_img)
reconstructed_labels.append(reconstructed_annotation)
else:
break
shape = (height, width, channels)
reconstructed_images = np.array(reconstructed_images)
reconstructed_labels = np.array(reconstructed_labels)
return reconstructed_images, reconstructed_labels, shape
def accuracy_per_category(pred, label, categories):
"""
Function to give the model's accuracy for each category.
:param pred: model's prediction
:type pred: np.array
:param label: true labels
:type label: np.array
:param categories: number of categories
:type categories: int
:return: accuracy's list
:rtype: list of float
"""
pred, label = list(pred), list(label)
results = []
for cat in range(categories):
vfunc = np.vectorize(lambda x: 1 if x == cat else 0)
mapped_pred = vfunc(pred)
mapped_labels = vfunc(label)
right = float(np.dot(mapped_pred, mapped_labels))
total = np.sum(mapped_labels)
if total == 0:
results.append(0.0)
else:
results.append((right / total))
return results
def get_random_architecture_and_activations(network_sizes,
categories=3,
upper_bound=6000):
"""
Creates a random architecture list and activations list
using a list of sizes for different networks.
:param network_sizes: list of network's size
:type network_sizes: list of int
:param categories: number of categories
:type categories: int
:param upper_bound: max number of nodes per layer
:type upper_bound: int
:return: list of hidden layer sizes, list of activation functions
:rtype: list of int, list of function tensorflow
"""
activations_dict = {0: tf.nn.relu,
1: tf.nn.sigmoid,
2: tf.nn.tanh}
hidden_layers = []
activations = []
lower_bound = categories
for size in network_sizes:
hidden_sizes = []
last = upper_bound
for _ in range(size):
if lower_bound < last / 2:
new_size = np.random.randint(lower_bound, last / 2)
else:
new_size = np.random.randint(lower_bound, lower_bound + 1)
hidden_sizes.append(new_size)
last = new_size
hidden_layers.append(hidden_sizes)
for hidden in hidden_layers:
activ = np.random.randint(0, 3, len(hidden))
activ = list(map(lambda x: activations_dict[x], activ))
activations.append(activ)
for hidden in hidden_layers:
hidden.append(categories)
return hidden_layers, activations
def parser_with_normalization(tfrecord):
"""
Parser function, transforming string into
a tuple of tensors.
:param tfrecord: a single binary serialized
:type tfrecord: tf.Tensor(shape=(), dype=tf.string)
:return: image, label
:rtype: tf.Tensor(shape=(1, height*width*channels),
dtype=tf.float32),
tf.Tensor(shape=(1,), dtyṕe=tf.int32)
"""
features = {'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
'labels_raw': tf.FixedLenFeature([], tf.string)}
tfrecord_parsed = tf.parse_single_sequence_example(
tfrecord, features)
image = tf.decode_raw(tfrecord_parsed[0]['image_raw'], tf.uint8)
image = tf.cast(image, tf.float32) / 255
label = tf.decode_raw(tfrecord_parsed[0]['labels_raw'], tf.uint8)
label = tf.cast(label, tf.int32)
return image, label
def get_iterator(filename, batch_size, parser):
"""
Function to get an interator.
:param filename: path to tfrecord dataset
:type filename: str
:param batch_size: size of the batch
:type batch_size: int
:param parser: function to parse a string
into a tensor
:type parser: tf.Tensor(shape=(), dype=tf.string)
->
tf.Tensor(shape=(1, height*width*channels),
dtype=tf.float32),
tf.Tensor(shape=(1,), dtyṕe=tf.int32)
:return: data iterator
:rtype: tf.contrib.data.Iterator
"""
dataset = tf.contrib.data.TFRecordDataset(filename)
dataset = dataset.map(parser)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(batch_size * 2)
iterator = dataset.make_initializable_iterator()
return iterator
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.