max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
setup.py | idex-biometrics/fusesoc | 829 | 27816 | <gh_stars>100-1000
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fusesoc",
packages=["fusesoc", "fusesoc.capi2", "fusesoc.provider"],
use_scm_version={
"relative_to": __file__,
"write_to": "fusesoc/version.py",
},
author="<NAME>",
author_email="<EMAIL>",
description=(
"FuseSoC is a package manager and a set of build tools for HDL "
"(Hardware Description Language) code."
),
license="BSD-2-Clause",
keywords=[
"VHDL",
"verilog",
"hdl",
"rtl",
"synthesis",
"FPGA",
"simulation",
"Xilinx",
"Altera",
],
url="https://github.com/olofk/fusesoc",
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: BSD License",
],
entry_points={"console_scripts": ["fusesoc = fusesoc.main:main"]},
setup_requires=[
"setuptools_scm",
],
install_requires=[
"edalize>=0.2.3",
"pyparsing",
"pyyaml",
"simplesat>=0.8.0",
],
# Supported Python versions: 3.6+
python_requires=">=3.6, <4",
)
|
amadeus/shopping/availability/__init__.py | minjikarin/amadeus-python | 125 | 27833 | from ._flight_availabilities import FlightAvailabilities
__all__ = ['FlightAvailabilities']
|
crits/comments/urls.py | dutrow/crits | 738 | 27851 | <reponame>dutrow/crits<filename>crits/comments/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^remove/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', views.remove_comment, name='crits-comments-views-remove_comment'),
url(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', views.add_update_comment, name='crits-comments-views-add_update_comment'),
url(r'^activity/$', views.activity, name='crits-comments-views-activity'),
url(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$', views.activity, name='crits-comments-views-activity'),
url(r'^activity/get_new_comments/$', views.get_new_comments, name='crits-comments-views-get_new_comments'),
url(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$', views.comment_search, name='crits-comments-views-comment_search'),
url(r'^list/$', views.comments_listing, name='crits-comments-views-comments_listing'),
url(r'^list/(?P<option>\S+)/$', views.comments_listing, name='crits-comments-views-comments_listing'),
]
|
tests/test_bitshares.py | silverchen0402/python-bitshares | 102 | 27853 | <filename>tests/test_bitshares.py
# -*- coding: utf-8 -*-
import mock
import string
import unittest
import random
from pprint import pprint
from bitshares import BitShares
from bitshares.account import Account
from bitsharesbase.operationids import getOperationNameForId
from bitshares.amount import Amount
from bitsharesbase.account import PrivateKey
from bitsharesbase.asset_permissions import todict
from bitshares.instance import set_shared_bitshares_instance
from .fixtures import fixture_data, bitshares
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_connect(self):
bitshares.connect()
def test_set_default_account(self):
bitshares.set_default_account("init0")
def test_info(self):
info = bitshares.info()
for key in [
"current_witness",
"head_block_id",
"head_block_number",
"id",
"last_irreversible_block_num",
"next_maintenance_time",
"recently_missed_count",
"time",
]:
self.assertTrue(key in info)
def test_finalizeOps(self):
tx1 = bitshares.new_tx()
tx2 = bitshares.new_tx()
bitshares.transfer("init1", 1, "BTS", append_to=tx1)
bitshares.transfer("init1", 2, "BTS", append_to=tx2)
bitshares.transfer("init1", 3, "BTS", append_to=tx1)
tx1 = tx1.json()
tx2 = tx2.json()
ops1 = tx1["operations"]
ops2 = tx2["operations"]
self.assertEqual(len(ops1), 2)
self.assertEqual(len(ops2), 1)
def test_transfer(self):
tx = bitshares.transfer("1.2.101", 1.33, "BTS", memo="Foobar", account="init0")
self.assertEqual(getOperationNameForId(tx["operations"][0][0]), "transfer")
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertEqual(op["from"], "1.2.100")
self.assertEqual(op["to"], "1.2.101")
amount = Amount(op["amount"])
self.assertEqual(float(amount), 1.33)
def test_create_account(self):
name = "".join(random.choice(string.ascii_lowercase) for _ in range(12))
key1 = PrivateKey()
key2 = PrivateKey()
key3 = PrivateKey()
key4 = PrivateKey()
tx = bitshares.create_account(
name,
registrar="init0", # 1.2.100
referrer="init1", # 1.2.101
referrer_percent=33,
owner_key=format(key1.pubkey, "BTS"),
active_key=format(key2.pubkey, "BTS"),
memo_key=format(key3.pubkey, "BTS"),
additional_owner_keys=[format(key4.pubkey, "BTS")],
additional_active_keys=[format(key4.pubkey, "BTS")],
additional_owner_accounts=["committee-account"], # 1.2.0
additional_active_accounts=["committee-account"],
proxy_account="init0",
storekeys=False,
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn("1.2.0", [x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn("1.2.0", [x[0] for x in op[role]["account_auths"]])
self.assertEqual(op["options"]["voting_account"], "1.2.100")
self.assertEqual(op["registrar"], "1.2.100")
self.assertEqual(op["referrer"], "1.2.101")
self.assertEqual(op["referrer_percent"], 33 * 100)
def test_create_asset(self):
symbol = "FOOBAR"
precision = 7
max_supply = 100000
description = "Test asset"
is_bitasset = True
market_fee_percent = 0.1
max_market_fee = 10
blacklist_authorities = ["init1"]
blacklist_authorities_ids = [Account(a)["id"] for a in blacklist_authorities]
blacklist_markets = ["BTS"]
blacklist_markets_ids = ["1.3.0"]
permissions = {
"charge_market_fee": True,
"white_list": True,
"override_authority": True,
"transfer_restricted": True,
"disable_force_settle": True,
"global_settle": True,
"disable_confidential": True,
"witness_fed_asset": True,
"committee_fed_asset": True,
}
flags = {
"charge_market_fee": False,
"white_list": False,
"override_authority": False,
"transfer_restricted": False,
"disable_force_settle": False,
"global_settle": False,
"disable_confidential": False,
"witness_fed_asset": False,
"committee_fed_asset": False,
}
tx = bitshares.create_asset(
symbol,
precision,
max_supply,
market_fee_percent=market_fee_percent,
max_market_fee=max_market_fee,
description=description,
is_bitasset=is_bitasset,
blacklist_authorities=blacklist_authorities,
blacklist_markets=blacklist_markets,
permissions=permissions,
flags=flags,
)
self.assertEqual(getOperationNameForId(tx["operations"][0][0]), "asset_create")
op = tx["operations"][0][1]
self.assertEqual(op["issuer"], "1.2.100")
self.assertEqual(op["symbol"], symbol)
self.assertEqual(op["precision"], precision)
self.assertEqual(
op["common_options"]["max_supply"], int(max_supply * 10 ** precision)
)
self.assertEqual(
op["common_options"]["market_fee_percent"], int(market_fee_percent * 100)
)
self.assertEqual(
op["common_options"]["max_market_fee"],
int(max_market_fee * 10 ** precision),
)
self.assertEqual(op["common_options"]["description"], description)
self.assertEqual(
op["common_options"]["blacklist_authorities"], blacklist_authorities_ids
)
self.assertEqual(
op["common_options"]["blacklist_markets"], blacklist_markets_ids
)
self.assertEqual(
todict(op["common_options"]["issuer_permissions"]), permissions
)
self.assertEqual(todict(op["common_options"]["flags"]), flags)
def test_weight_threshold(self):
auth = {
"account_auths": [["1.2.0", "1"]],
"extensions": [],
"key_auths": [
["<KEY>", 1],
["<KEY>", 1],
],
"weight_threshold": 3,
} # threshold fine
bitshares._test_weights_treshold(auth)
auth = {
"account_auths": [["1.2.0", "1"]],
"extensions": [],
"key_auths": [
["<KEY>", 1],
["BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv", 1],
],
"weight_threshold": 4,
} # too high
with self.assertRaises(ValueError):
bitshares._test_weights_treshold(auth)
def test_allow(self):
tx = bitshares.allow(
"BTS<KEY>",
weight=1,
threshold=1,
permission="owner",
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("owner", op)
self.assertIn(
["B<KEY>", "1"],
op["owner"]["key_auths"],
)
self.assertEqual(op["owner"]["weight_threshold"], 1)
def test_disallow(self):
with self.assertRaisesRegex(ValueError, ".*Changes nothing.*"):
bitshares.disallow(
"BTS<KEY>",
weight=1,
threshold=1,
permission="owner",
)
with self.assertRaisesRegex(ValueError, "Cannot have threshold of 0"):
bitshares.disallow(
"BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8B<KEY>",
weight=1,
threshold=1,
permission="owner",
)
bitshares.disallow(
"BTS5i8bEmtnN4fP4jAsBe17z9CCuQcHLkRyTuRZXYZeN2kVCL1sXa",
weight=1,
threshold=1,
permission="active",
)
def test_update_memo_key(self):
tx = bitshares.update_memo_key(
"<KEY>"
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertEqual(
op["new_options"]["memo_key"],
"<KEY>",
)
def test_approvewitness(self):
tx = bitshares.approvewitness("1.6.1")
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("1:0", op["new_options"]["votes"])
def test_approvecommittee(self):
tx = bitshares.approvecommittee("1.5.0")
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("0:11", op["new_options"]["votes"])
|
kivy/tests/pyinstaller/simple_widget/project/widget.py | Galland/kivy | 13,889 | 27864 | <filename>kivy/tests/pyinstaller/simple_widget/project/widget.py<gh_stars>1000+
from kivy.uix.widget import Widget
class MyWidget(Widget):
def __init__(self, **kwargs):
super(MyWidget, self).__init__(**kwargs)
def callback(*l):
self.x = self.y
self.fbind('y', callback)
callback()
|
api/guids/urls.py | gaybro8777/osf.io | 628 | 27880 | <gh_stars>100-1000
from django.conf.urls import url
from api.guids import views
app_name = 'osf'
urlpatterns = [
url(r'^(?P<guids>\w+)/$', views.GuidDetail.as_view(), name=views.GuidDetail.view_name),
]
|
cogdl/models/emb/rotate.py | cenyk1230/cogdl | 1,072 | 27893 | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .. import BaseModel, register_model
from .knowledge_base import KGEModel
@register_model("rotate")
class RotatE(KGEModel):
r"""
Implementation of RotatE model from the paper `"RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space"
<https://openreview.net/forum?id=HkgEQnRqYQ>`.
borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`
"""
def __init__(
self, nentity, nrelation, hidden_dim, gamma, double_entity_embedding=False, double_relation_embedding=False
):
super(RotatE, self).__init__(nentity, nrelation, hidden_dim, gamma, True, double_relation_embedding)
def score(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == "head-batch":
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
|
Lib/fontTools/ttLib/tables/T_S_I_B_.py | twardoch/fonttools-py27 | 240 | 27902 | <reponame>twardoch/fonttools-py27
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .T_S_I_V_ import table_T_S_I_V_
class table_T_S_I_B_(table_T_S_I_V_):
pass
|
calico/datadog_checks/calico/check.py | davidlrosenblum/integrations-extras | 158 | 27950 | <reponame>davidlrosenblum/integrations-extras
from datadog_checks.base import OpenMetricsBaseCheckV2
from .metrics import METRIC_MAP
class CalicoCheck(OpenMetricsBaseCheckV2):
def __init__(self, name, init_config, instances=None):
super(CalicoCheck, self).__init__(
name,
init_config,
instances,
)
def get_default_config(self):
return {'namespace': 'calico', 'metrics': [METRIC_MAP]}
|
dojo/unittests/tools/test_cloudsploit_parser.py | art-tykh/django-DefectDojo | 1,772 | 27956 | from django.test import TestCase
from dojo.models import Test
from dojo.tools.cloudsploit.parser import CloudsploitParser
class TestCloudsploitParser(TestCase):
def test_cloudsploit_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_zero_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_one_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_cloudsploit_parser_with_many_vuln_has_many_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_many_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
|
corehq/ex-submodules/phonelog/management/commands/migrate_device_entry.py | dimagilg/commcare-hq | 471 | 27959 | <reponame>dimagilg/commcare-hq
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection
from phonelog.models import OldDeviceReportEntry, DeviceReportEntry
COLUMNS = (
"xform_id", "i", "msg", "type", "date", "server_date", "domain",
"device_id", "app_version", "username", "user_id"
)
class Command(BaseCommand):
help = "Migrate device reports to partitioned table"
def handle(self, *args, **options):
partitioned_table = DeviceReportEntry._meta.db_table
old_table = OldDeviceReportEntry._meta.db_table
now = datetime.utcnow()
oldest_date = now - timedelta(days=settings.DAYS_TO_KEEP_DEVICE_LOGS)
current = now
while current > oldest_date:
hour_ago = current - timedelta(hours=1)
with connection.cursor() as cursor:
cursor.execute(
"INSERT INTO " + partitioned_table +
" (" + ','.join(COLUMNS) + ") " +
"SELECT " +
','.join(COLUMNS) + " " +
"FROM " + old_table + " " +
"WHERE server_date > %s AND server_date <= %s",
[hour_ago, current]
)
print("Inserted device logs from %s to %s" % (hour_ago, current))
current = hour_ago
|
tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/ddpg_agent.py | VanessaDo/cloudml-samples | 1,552 | 27971 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a DDPG agent.
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
"""
import agent
from common import replay_buffer
from common.actor_critic import ActorNetwork
from common.actor_critic import CriticNetwork
import numpy as np
class DDPG(agent.Agent):
"""DDPG agent."""
def __init__(self, env, sess, config):
"""Initialize members."""
state_dim = env.observation_space.shape[0]
self.env = env
self.action_dim = env.action_space.shape[0]
self.action_high = env.action_space.high
self.action_low = env.action_space.low
self.batch_size = config.batch_size
self.warmup_size = config.warmup_size
self.gamma = config.gamma
self.sigma = config.sigma
self.noise_cap = config.c
self.actor = ActorNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
action_high=self.action_high,
action_low=self.action_low,
learning_rate=config.actor_lr,
grad_norm_clip=config.grad_norm_clip,
tau=config.tau,
batch_size=config.batch_size)
self.critic = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma)
self.replay_buffer = replay_buffer.ReplayBuffer(
buffer_size=config.buffer_size)
def random_action(self, observation):
"""Return a random action."""
return self.env.action_space.sample()
def action(self, observation):
"""Return an action according to the agent's policy."""
return self.actor.get_action(observation)
def action_with_noise(self, observation):
"""Return a noisy action."""
if self.replay_buffer.size > self.warmup_size:
action = self.action(observation)
else:
action = self.random_action(observation)
noise = np.clip(np.random.randn(self.action_dim) * self.sigma,
-self.noise_cap, self.noise_cap)
action_with_noise = action + noise
return (np.clip(action_with_noise, self.action_low, self.action_high),
action, noise)
def store_experience(self, s, a, r, t, s2):
"""Save experience to replay buffer."""
self.replay_buffer.add(s, a, r, t, s2)
def train(self, global_step):
"""Train the agent's policy for 1 iteration."""
if self.replay_buffer.size > self.warmup_size:
s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)
target_actions = self.actor.get_target_action(s1)
target_qval = self.get_target_qval(s1, target_actions)
t = t.astype(dtype=int)
y = r + self.gamma * target_qval * (1 - t)
self.critic.train(s0, a, y)
actions = self.actor.get_action(s0)
grads = self.critic.get_action_gradients(s0, actions)
self.actor.train(s0, grads[0])
self.update_targets()
def update_targets(self):
"""Update all target networks."""
self.actor.update_target_network()
self.critic.update_target_network()
def get_target_qval(self, observation, action):
"""Get target Q-val."""
return self.critic.get_target_qval(observation, action)
def get_qval(self, observation, action):
"""Get Q-val."""
return self.critic.get_qval(observation, action)
|
observations/r/unemp_dur.py | hajime9652/observations | 199 | 27998 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def unemp_dur(path):
"""Unemployment Duration
Journal of Business Economics and Statistics web site :
http://amstat.tandfonline.com/loi/ubes20
*number of observations* : 3343
A time serie containing :
spell
length of spell in number of two-week intervals
censor1
= 1 if re-employed at full-time job
censor2
= 1 if re-employed at part-time job
censor3
1 if re-employed but left job: pt-ft status unknown
censor4
1 if still jobless
age
age
ui
= 1 if filed UI claim
reprate
eligible replacement rate
disrate
eligible disregard rate
logwage
log weekly earnings in lost job (1985\\$)
tenure
years tenure in lost job
<NAME>. (1996) “Unemployment Insurance Rules, Joblessness, and
Part-time Work”, *Econometrica*, **64**, 647–682.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `unemp_dur.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3343 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'unemp_dur.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/UnempDur.csv'
maybe_download_and_extract(path, url,
save_file_name='unemp_dur.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
vissl/models/heads/__init__.py | blazejdolicki/vissl | 2,512 | 28004 | <gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Callable
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
MODEL_HEADS_REGISTRY = {}
MODEL_HEADS_NAMES = set()
def register_model_head(name: str):
"""Registers Self-Supervision Model Heads.
This decorator allows VISSL to add custom model heads, even if the
model head itself is not part of VISSL. To use it, apply this decorator
to a model head class, like this:
.. code-block:: python
@register_model_head('my_model_head_name')
def my_model_head():
...
To get a model head from a configuration file, see :func:`get_model_head`."""
def register_model_head_cls(cls: Callable[..., Callable]):
if name in MODEL_HEADS_REGISTRY:
raise ValueError("Cannot register duplicate model head ({})".format(name))
if cls.__name__ in MODEL_HEADS_NAMES:
raise ValueError(
"Cannot register task with duplicate model head name ({})".format(
cls.__name__
)
)
MODEL_HEADS_REGISTRY[name] = cls
MODEL_HEADS_NAMES.add(cls.__name__)
return cls
return register_model_head_cls
def get_model_head(name: str):
"""
Given the model head name, construct the head if it's registered
with VISSL.
"""
assert name in MODEL_HEADS_REGISTRY, "Unknown model head"
return MODEL_HEADS_REGISTRY[name]
# automatically import any Python files in the heads/ directory
import_all_modules(FILE_ROOT, "vissl.models.heads")
from vissl.models.heads.linear_eval_mlp import LinearEvalMLP # isort:skip # noqa
from vissl.models.heads.mlp import MLP # isort:skip # noqa
from vissl.models.heads.siamese_concat_view import ( # isort:skip # noqa
SiameseConcatView,
)
from vissl.models.heads.swav_prototypes_head import ( # isort:skip # noqa
SwAVPrototypesHead,
)
__all__ = [
"get_model_head",
"LinearEvalMLP",
"MLP",
"SiameseConcatView",
"SwAVPrototypesHead",
]
|
homeassistant/components/ridwell/const.py | MrDelik/core | 30,023 | 28026 | """Constants for the Ridwell integration."""
import logging
DOMAIN = "ridwell"
LOGGER = logging.getLogger(__package__)
DATA_ACCOUNT = "account"
DATA_COORDINATOR = "coordinator"
SENSOR_TYPE_NEXT_PICKUP = "next_pickup"
|
models/__init__.py | dudtjakdl/OpenNMT-Korean-To-English | 1,491 | 28036 | from .EncoderRNN import EncoderRNN
from .DecoderRNN import DecoderRNN
from .TopKDecoder import TopKDecoder
from .seq2seq import Seq2seq
|
panel/layout/spacer.py | sthagen/holoviz-panel | 601 | 28037 | """
Spacer components to add horizontal or vertical space to a layout.
"""
import param
from bokeh.models import Div as BkDiv, Spacer as BkSpacer
from ..reactive import Reactive
class Spacer(Reactive):
"""
The `Spacer` layout is a very versatile component which makes it easy to
put fixed or responsive spacing between objects.
Like all other components spacers support both absolute and responsive
sizing modes.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... 1, pn.Spacer(width=200),
... 2, pn.Spacer(width=100),
... 3
... )
"""
_bokeh_model = BkSpacer
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
model = self._bokeh_model(**properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
class VSpacer(Spacer):
"""
The `VSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Column(
... pn.layout.VSpacer(), 'Item 1',
... pn.layout.VSpacer(), 'Item 2',
... pn.layout.VSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_height', readonly=True)
class HSpacer(Spacer):
"""
The `HSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... pn.layout.HSpacer(), 'Item 1',
... pn.layout.HSpacer(), 'Item 2',
... pn.layout.HSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_width', readonly=True)
class Divider(Reactive):
"""
A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate
multiple components in a layout. It automatically spans the full width of
the container.
Reference: https://panel.holoviz.org/reference/layouts/Divider.html
:Example:
>>> pn.Column(
... '# Lorem Ipsum',
... pn.layout.Divider(),
... 'A very long text... '
>>> )
"""
width_policy = param.ObjectSelector(default="fit", readonly=True)
_bokeh_model = BkDiv
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
properties['style'] = {'width': '100%', 'height': '100%'}
model = self._bokeh_model(text='<hr style="margin: 0px">', **properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
|
reddit2telegram/channels/r_gentlemanboners/app.py | mainyordle/reddit2telegram | 187 | 28038 | <gh_stars>100-1000
#encoding:utf-8
from utils import weighted_random_subreddit
subreddit = weighted_random_subreddit({
'BeautifulFemales': 0.25,
'cutegirlgifs': 0.25,
'gentlemanboners': 0.25,
'gentlemanbonersgifs': 0.25
})
t_channel = '@r_gentlemanboners'
def send_post(submission, r2t):
return r2t.send_simple(submission,
text=False,
gif=True,
img=True,
album=False,
other=False
)
|
pytorch_ares/third_party/free_adv_train/multi_restart_pgd_attack.py | thu-ml/realsafe | 107 | 28064 | <reponame>thu-ml/realsafe
"""
Implementation of attack methods. Running this file as a program will
evaluate the model and get the validation accuracy and then
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import sys
import cifar10_input
import cifar100_input
import config
from tqdm import tqdm
import os
config = config.get_args()
_NUM_RESTARTS = config.num_restarts
class LinfPGDAttack:
def __init__(self, model, epsilon, num_steps, step_size, loss_func):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
if loss_func == 'xent':
loss = model.xent
elif loss_func == 'cw':
label_mask = tf.one_hot(model.y_input,
10,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
wrong_logit = tf.reduce_max((1 - label_mask) * model.pre_softmax - 1e4 * label_mask, axis=1)
loss = -tf.nn.relu(correct_logit - wrong_logit + 0)
else:
print('Unknown loss function. Defaulting to cross-entropy')
loss = model.xent
self.grad = tf.gradients(loss, model.x_input)[0]
def perturb(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
x = np.clip(x, 0, 255)
for i in range(self.num_steps):
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y})
x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
x = np.clip(x, 0, 255) # ensure valid pixel range
return x
def get_path_dir(data_dir, dataset, **_):
path = os.path.join(data_dir, dataset)
if os.path.islink(path):
path = os.readlink(path)
return path
if __name__ == '__main__':
import sys
import math
from free_model import Model
model_file = tf.train.latest_checkpoint(config.model_dir)
if model_file is None:
print('No model found')
sys.exit()
dataset = config.dataset
data_dir = config.data_dir
data_path = get_path_dir(data_dir, dataset)
model = Model(mode='eval', dataset=dataset)
attack = LinfPGDAttack(model,
config.epsilon,
config.pgd_steps,
config.step_size,
config.loss_func)
saver = tf.train.Saver()
if dataset == 'cifar10':
cifar = cifar10_input.CIFAR10Data(data_path)
else:
cifar = cifar100_input.CIFAR100Data(data_path)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config.eval_examples
eval_batch_size = config.eval_size
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
x_adv = [] # adv accumulator
print('getting clean validation accuracy')
total_corr = 0
for ibatch in tqdm(range(num_batches)):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = cifar.eval_data.xs[bstart:bend, :].astype(np.float32)
y_batch = cifar.eval_data.ys[bstart:bend]
dict_val = {model.x_input: x_batch, model.y_input: y_batch}
cur_corr = sess.run(model.num_correct, feed_dict=dict_val)
total_corr += cur_corr
print('** validation accuracy: %.3f **\n\n' % (total_corr / float(num_eval_examples) * 100))
print('Iterating over {} batches'.format(num_batches))
total_corr, total_num = 0, 0
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
curr_num = bend - bstart
total_num += curr_num
print('mini batch: {}/{} -- batch size: {}'.format(ibatch + 1, num_batches, curr_num))
sys.stdout.flush()
x_batch = cifar.eval_data.xs[bstart:bend, :].astype(np.float32)
y_batch = cifar.eval_data.ys[bstart:bend]
best_batch_adv = np.copy(x_batch)
dict_adv = {model.x_input: best_batch_adv, model.y_input: y_batch}
cur_corr, y_pred_batch, best_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict=dict_adv)
for ri in range(_NUM_RESTARTS):
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
dict_adv = {model.x_input: x_batch_adv, model.y_input: y_batch}
cur_corr, y_pred_batch, this_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict=dict_adv)
bb = best_loss >= this_loss
bw = best_loss < this_loss
best_batch_adv[bw, :, :, :] = x_batch_adv[bw, :, :, :]
best_corr, y_pred_batch, best_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict={model.x_input: best_batch_adv,
model.y_input: y_batch})
print('restart %d: num correct: %d -- loss:%.4f' % (ri, best_corr, np.mean(best_loss)))
total_corr += best_corr
print('accuracy till now {:4}% \n\n'.format(float(total_corr) / total_num * 100))
x_adv.append(best_batch_adv)
x_adv = np.concatenate(x_adv, axis=0)
|
ch17/yunqiCrawl/yunqiCrawl/scrapy_redis/connection.py | AaronZhengkk/SpiderBook | 990 | 28069 | <gh_stars>100-1000
import redis
# Default values.
REDIS_URL = None
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
FILTER_URL = None
FILTER_HOST = 'localhost'
FILTER_PORT = 6379
FILTER_DB = 0
def from_settings(settings):
url = settings.get('REDIS_URL', REDIS_URL)
host = settings.get('REDIS_HOST', REDIS_HOST)
port = settings.get('REDIS_PORT', REDIS_PORT)
# REDIS_URL takes precedence over host/port specification.
if url:
return redis.from_url(url)
else:
return redis.Redis(host=host, port=port)
def from_settings_filter(settings):
url = settings.get('FILTER_URL', FILTER_URL)
host = settings.get('FILTER_HOST', FILTER_HOST)
port = settings.get('FILTER_PORT', FILTER_PORT)
db = settings.get('FILTER_DB', FILTER_DB)
if url:
return redis.from_url(url)
else:
return redis.Redis(host=host, port=port, db=db)
|
modules/tools/open_space_visualization/open_space_roi_visualizer.py | jzjonah/apollo | 22,688 | 28115 | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# @file to run it, change the modules/common/configs/config_gflags.cc to use sunnyvale_with_two_offices
from open_space_roi_interface import *
import matplotlib.pyplot as plt
# initialize object
open_space_roi = open_space_roi()
lane_id = "11564dup1_1_-1"
parking_id = "11543"
num_output_buffer = 50
unrotated_roi_boundary_x = (c_double * num_output_buffer)()
roi_boundary_x = (c_double * num_output_buffer)()
parking_spot_x = (c_double * num_output_buffer)()
unrotated_roi_boundary_y = (c_double * num_output_buffer)()
roi_boundary_y = (c_double * num_output_buffer)()
parking_spot_y = (c_double * num_output_buffer)()
end_pose = (c_double * num_output_buffer)()
xy_boundary = (c_double * num_output_buffer)()
origin_pose = (c_double * num_output_buffer)()
if not open_space_roi.ROITest(lane_id, parking_id,
unrotated_roi_boundary_x, unrotated_roi_boundary_y, roi_boundary_x, roi_boundary_y,
parking_spot_x, parking_spot_y, end_pose,
xy_boundary, origin_pose):
print("open_space_roi fail")
result_unrotated_roi_boundary_x = []
result_unrotated_roi_boundary_y = []
result_roi_boundary_x = []
result_roi_boundary_y = []
result_parking_spot_x = []
result_parking_spot_y = []
result_end_pose = []
result_xy_boundary = []
result_origin_pose = []
print("vertices of obstacles")
for i in range(0, 10):
result_unrotated_roi_boundary_x.append(float(unrotated_roi_boundary_x[i]))
result_unrotated_roi_boundary_y.append(float(unrotated_roi_boundary_y[i]))
result_roi_boundary_x.append(float(roi_boundary_x[i]))
result_roi_boundary_y.append(float(roi_boundary_y[i]))
print(str(float(roi_boundary_x[i])))
print(str(float(roi_boundary_y[i])))
print("parking spot")
for i in range(0, 4):
result_parking_spot_x.append(float(parking_spot_x[i]))
result_parking_spot_y.append(float(parking_spot_y[i]))
print("end_pose in x,y,phi,v")
for i in range(0, 4):
print(str(float(end_pose[i])))
print("xy_boundary in xmin xmax ymin ymax")
for i in range(0, 4):
print(str(float(xy_boundary[i])))
print("origin_pose")
for i in range(0, 2):
print(str(float(origin_pose[i])))
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.scatter(result_unrotated_roi_boundary_x, result_unrotated_roi_boundary_y)
ax1.scatter(result_parking_spot_x, result_parking_spot_y)
ax2 = fig.add_subplot(212)
ax2.scatter(result_roi_boundary_x, result_roi_boundary_y)
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
|
gfauto/gfauto/test_util.py | KishkinJ10/graphicsfuzz | 519 | 28130 | # -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utility module.
A test directory contains a Test proto stored in "source/test.json", the reference and variant shader jobs, and various
other files, including results.
This module is used to read Test proto files and get various paths that exist in test directories.
"""
from pathlib import Path
from gfauto import proto_util, util
from gfauto.test_pb2 import Test
TEST_METADATA = "test.json"
REFERENCE_DIR = "reference"
VARIANT_DIR = "variant"
SHADER_JOB = "shader.json"
SHADER_JOB_RESULT = "shader.info.json"
def get_source_dir(test_dir: Path) -> Path:
return test_dir / "source"
def get_metadata_path(test_dir: Path) -> Path:
return get_metadata_path_from_source_dir(get_source_dir(test_dir))
def get_metadata_path_from_source_dir(source_dir: Path) -> Path:
return source_dir / TEST_METADATA
def metadata_write(metadata: Test, test_dir: Path) -> Path:
metadata_write_to_path(metadata, get_metadata_path(test_dir))
return test_dir
def metadata_read(test_dir: Path) -> Test:
return metadata_read_from_path(get_metadata_path(test_dir))
def metadata_read_from_source_dir(source_dir: Path) -> Test:
return metadata_read_from_path(get_metadata_path_from_source_dir(source_dir))
def metadata_read_from_path(test_metadata_path: Path) -> Test:
text = util.file_read_text(test_metadata_path)
result = Test()
proto_util.json_to_message(text, result)
return result
def metadata_write_to_path(metadata: Test, test_metadata_path: Path) -> Path:
text = proto_util.message_to_json(metadata)
util.file_write_text(test_metadata_path, text)
return test_metadata_path
def get_shader_job_path(test_dir: Path, shader_name: str) -> Path:
return test_dir / "source" / shader_name / SHADER_JOB
def get_device_directory(test_dir: Path, device_name: str) -> Path:
return test_dir / "results" / device_name
def get_results_directory(test_dir: Path, device_name: str) -> Path:
return get_device_directory(test_dir, device_name) / "result"
def get_reductions_dir(test_dir: Path, device_name: str) -> Path:
return get_device_directory(test_dir, device_name) / "reductions"
def get_reduced_test_dir(test_dir: Path, device_name: str, reduction_name: str) -> Path:
return get_reductions_dir(test_dir, device_name) / reduction_name
def get_reduction_work_directory(reduced_test_dir: Path, name_of_shader: str) -> Path:
return reduced_test_dir / "reduction_work" / name_of_shader
|
core/management_utils.py | crydotsnake/djangogirls | 446 | 28144 | import djclick as click
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from .forms import AddOrganizerForm
from .slack_client import slack
# "Get organizers info" functions used in 'new_event' and 'copy_event' management commands.
def get_main_organizer():
"""
We're asking user for name and address of main organizer, and return
a list of dictionary.
"""
team = []
click.echo(_("Let's talk about the team. First the main organizer:"))
main_name = click.prompt(click.style(
"First and last name", bold=True, fg='yellow'
))
main_email = click.prompt(click.style(
"E-mail address", bold=True, fg='yellow'
))
team.append({'name': main_name, 'email': main_email})
click.echo("All right, the main organizer is {0} ({1})".format(main_name, main_email))
return team
def get_team(team):
"""
We're asking user for names and address of the rest of the team,
and append that to a list we got from get_main_organizer
"""
add_team = click.confirm(click.style(
"Do you want to add additional team members?", bold=True, fg='yellow'
), default=False)
i = 1
while add_team:
i += 1
name = click.prompt(click.style(
f"First and last name of #{i} member", bold=True, fg='yellow'
))
email = click.prompt(click.style(
f"E-mail address of #{i} member", bold=True, fg='yellow'
))
if len(name) > 0:
team.append({'name': name, 'email': email})
click.echo(
f"All right, the #{i} team member of Django Girls is {name} ({email})"
)
add_team = click.confirm(click.style(
"Do you want to add additional team members?", bold=True, fg='yellow'
), default=False)
return team
def create_users(team, event):
"""
Create or get User objects based on team list
"""
members = []
for member in team:
member['event'] = event.pk
form = AddOrganizerForm(member)
user = form.save()
members.append(user)
return members
def brag_on_slack_bang(city, country, team):
"""
This is posting a message about Django Girls new event to #general channel on Slack!
"""
if settings.ENABLE_SLACK_NOTIFICATIONS:
text = f":django_pony: :zap: Woohoo! :tada: New Django Girls alert! " \
f"Welcome Django Girls {city}, {country}. " \
f"Congrats {', '.join(['{} {}'.format(x.first_name, x.last_name) for x in team])}!"
slack.chat.post_message(
channel='#general',
text=text,
username='Django Girls',
icon_emoji=':django_heart:'
)
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/setup.py | hito0512/Vitis-AI | 848 | 28149 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install tensorflow_model_optimization."""
import datetime
import os
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# To enable importing version.py directly, we add its path to sys.path.
version_path = os.path.join(
os.path.dirname(__file__), 'tensorflow_model_optimization', 'python/core')
sys.path.append(version_path)
from version import __version__ # pylint: disable=g-import-not-at-top
# TODO(alanchiao): add explicit Tensorflow requirement once Tensorflow
# moves from a tf and tf-gpu packaging approach (where a user installs
# one of the two) to one where a user installs the tf package and then
# also installs the gpu package if they need gpu support. The latter allows
# us (and our dependents) to maintain a single package instead of two.
REQUIRED_PACKAGES = [
'numpy~=1.14',
'six~=1.10',
'enum34~=1.1;python_version<"3.4"',
'dm-tree~=0.1.1',
]
if '--release' in sys.argv:
release = True
sys.argv.remove('--release')
else:
# Build a nightly package by default.
release = False
if release:
project_name = 'vai-q-tensorflow2'
else:
# Nightly releases use date-based versioning of the form
# '0.0.1.dev20180305'
project_name = 'vai-q-tensorflow2-nightly'
datestring = datetime.datetime.now().strftime('%Y%m%d')
__version__ += datestring
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return False
setup(
name=project_name,
version=__version__,
description='Xilinx Vitis AI Quantizer for Tensorflow 2.x. '
'This is customized based on tensorflow-model-optimization('
'https://github.com/tensorflow/model-optimization)'
'A suite of tools that users, both novice and advanced'
' can use to optimize machine learning models for deployment'
' and execution.',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={'': ['*.so', '*.json']},
exclude_package_data={'': ['BUILD', '*.h', '*.cc']},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'pip_pkg': InstallCommandBase,
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow model optimization machine learning',
)
|
src/django_perf_rec/settings.py | adamchainz/django-perf-rec | 147 | 28167 | import sys
from typing import Any
from django.conf import settings
if sys.version_info >= (3, 8):
from typing import Literal
ModeType = Literal["once", "none", "all"]
else:
ModeType = str
class Settings:
defaults = {"HIDE_COLUMNS": True, "MODE": "once"}
def get_setting(self, key: str) -> Any:
try:
return settings.PERF_REC[key]
except (AttributeError, KeyError):
return self.defaults.get(key, None)
@property
def HIDE_COLUMNS(self) -> bool:
return self.get_setting("HIDE_COLUMNS")
@property
def MODE(self) -> ModeType:
return self.get_setting("MODE")
perf_rec_settings = Settings()
|
test/test_igp_shortcuts.py | tim-fiola/network_traffic_modeler_py3 | 102 | 28206 | <filename>test/test_igp_shortcuts.py
import unittest
from pyNTM import FlexModel
from pyNTM import ModelException
from pyNTM import PerformanceModel
class TestIGPShortcuts(unittest.TestCase):
def test_traffic_on_shortcut_lsps(self):
"""
Verify Interface and LSP traffic when IGP shortcuts enabled
in baseline model.
"""
# The demands should take LSPs starting on the first
# node that has shortcuts and should take the LSP that
# leads it closest to the demand destination
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
model.update_simulation()
# Get all the interface objects
int_a_b = model.get_interface_object("A-B", "A")
int_b_c = model.get_interface_object("B-C", "B")
int_c_d = model.get_interface_object("C-D", "C")
int_d_e = model.get_interface_object("D-E", "D")
int_e_f = model.get_interface_object("E-F", "E")
int_a_g = model.get_interface_object("A-G", "A")
int_g_f = model.get_interface_object("G-F", "G")
# Get all LSP objects
lsp_b_d_1 = model.get_rsvp_lsp("B", "D", "lsp_b_d_1")
lsp_b_d_2 = model.get_rsvp_lsp("B", "D", "lsp_b_d_2")
lsp_c_e_1 = model.get_rsvp_lsp("C", "E", "lsp_c_e_1")
lsp_d_f_1 = model.get_rsvp_lsp("D", "F", "lsp_d_f_1")
# Get demand objects
dmd_a_f_1 = model.get_demand_object("A", "F", "dmd_a_f_1")
dmd_d_f_1 = model.get_demand_object("D", "F", "dmd_d_f_1")
# Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 2.5)
self.assertEqual(lsp_b_d_2.traffic_on_lsp(model), 2.5)
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_d_f_1.traffic_on_lsp(model), 13.0)
# Verify demand paths
self.assertIn([int_a_g, int_g_f], dmd_a_f_1.path)
self.assertIn([int_a_b, lsp_b_d_1, lsp_d_f_1], dmd_a_f_1.path)
self.assertIn([int_a_b, lsp_b_d_2, lsp_d_f_1], dmd_a_f_1.path)
self.assertEqual(dmd_d_f_1.path, [[lsp_d_f_1]])
# Verify interface traffic
self.assertEqual(int_a_b.traffic, 5.0)
self.assertEqual(int_b_c.traffic, 5.0)
self.assertEqual(int_c_d.traffic, 5.0)
self.assertEqual(int_d_e.traffic, 13.0)
self.assertEqual(int_e_f.traffic, 13.0)
self.assertEqual(int_a_g.traffic, 5.0)
self.assertEqual(int_g_f.traffic, 5.0)
# Verify LSPs on interfaces
self.assertIn(lsp_b_d_1, int_b_c.lsps(model))
self.assertIn(lsp_b_d_2, int_b_c.lsps(model))
self.assertIn(lsp_b_d_1, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_c_e_1, int_c_d.lsps(model))
def test_igp_shortcut_node_attributes(self):
# The IGP shortcut attribute should be True
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
node_b = model.get_node_object("B")
self.assertTrue(node_b.igp_shortcuts_enabled)
# Remove igp_shortcuts_enabled on node B, traffic should appear on lsp_c_e_1
# and disappear from lsp_b_d_1/2 and lsp_d_f_1
def test_remove_shortcuts_node_b(self):
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
node_b = model.get_node_object("B")
node_b.igp_shortcuts_enabled = False
model.update_simulation()
# Get LSP objects
lsp_b_d_1 = model.get_rsvp_lsp("B", "D", "lsp_b_d_1")
lsp_b_d_2 = model.get_rsvp_lsp("B", "D", "lsp_b_d_2")
lsp_c_e_1 = model.get_rsvp_lsp("C", "E", "lsp_c_e_1")
lsp_d_f_1 = model.get_rsvp_lsp("D", "F", "lsp_d_f_1")
dmd_a_f_1 = model.get_demand_object("A", "F", "dmd_a_f_1")
dmd_d_f_1 = model.get_demand_object("D", "F", "dmd_d_f_1")
# Half the traffic from dmd_a_f_1 should be on lsp_c_e_1
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model), 5.0)
# dmd_a_f_1 should be the only demand on lsp_c_e_1
self.assertEqual(lsp_c_e_1.demands_on_lsp(model), [dmd_a_f_1])
# dmd_d_f_1 should be the only demand on lsp_d_f_1
self.assertEqual(lsp_d_f_1.demands_on_lsp(model), [dmd_d_f_1])
# LSPs from B to D should have no demands and no traffic
self.assertEqual(lsp_b_d_1.demands_on_lsp(model), [])
self.assertEqual(lsp_b_d_2.demands_on_lsp(model), [])
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_b_d_2.traffic_on_lsp(model), 0)
def test_demands_no_shortcuts(self):
"""
The demand should take the LSP if the IGP shortcut attribute is True on node B.
When the IGP shortcut attribute is turned to False, the demand should
only IGP route. Change all igp_shortcuts_enabled flags to False.
Test LSP and Interface traffic.
"""
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
model.update_simulation()
# Get all LSP objects
lsp_b_d_1 = model.get_rsvp_lsp("B", "D", "lsp_b_d_1")
lsp_b_d_2 = model.get_rsvp_lsp("B", "D", "lsp_b_d_2")
lsp_c_e_1 = model.get_rsvp_lsp("C", "E", "lsp_c_e_1")
lsp_d_f_1 = model.get_rsvp_lsp("D", "F", "lsp_d_f_1")
# Get some node objects
node_b = model.get_node_object("B")
node_c = model.get_node_object("C")
node_d = model.get_node_object("D")
node_e = model.get_node_object("E")
# Get LSP object
dmd_d_f_1 = model.get_demand_object("D", "F", "dmd_d_f_1")
# Set the node igp_shortcuts_enabled attribute to False
node_b.igp_shortcuts_enabled = False
node_c.igp_shortcuts_enabled = False
node_d.igp_shortcuts_enabled = False
node_e.igp_shortcuts_enabled = False
model.update_simulation()
# Only lsp_d_f_1 should have traffic/demands
self.assertEqual(lsp_b_d_1.demands_on_lsp(model), [])
self.assertEqual(lsp_b_d_2.demands_on_lsp(model), [])
self.assertEqual(lsp_c_e_1.demands_on_lsp(model), [])
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_b_d_2.traffic_on_lsp(model), 0)
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_d_f_1.demands_on_lsp(model), [dmd_d_f_1])
self.assertEqual(lsp_d_f_1.traffic_on_lsp(model), 8.0)
def test_igp_shortcut_perf_model(self):
model = PerformanceModel.load_model_file("test/igp_routing_topology.csv")
node_a = model.get_node_object("A")
node_a.igp_shortcuts_enabled = True
err_msg = "igp_shortcuts_enabled not allowed in PerformanceModel, but present on these Nodes"
with self.assertRaises(ModelException) as context:
model.update_simulation()
self.assertIn(err_msg, context.exception.args[0][1][0].keys())
# If one LSP from B to D is assigned a lower metric, traffic should
# not split at A
def test_changed_metric(self):
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
# Get all the interface objects
int_a_b = model.get_interface_object("A-B", "A")
int_b_c = model.get_interface_object("B-C", "B")
int_c_d = model.get_interface_object("C-D", "C")
int_d_e = model.get_interface_object("D-E", "D")
int_e_f = model.get_interface_object("E-F", "E")
int_a_g = model.get_interface_object("A-G", "A")
int_g_f = model.get_interface_object("G-F", "G")
# Get all LSP objects
lsp_b_d_1 = model.get_rsvp_lsp("B", "D", "lsp_b_d_1")
lsp_b_d_2 = model.get_rsvp_lsp("B", "D", "lsp_b_d_2")
lsp_c_e_1 = model.get_rsvp_lsp("C", "E", "lsp_c_e_1")
lsp_d_f_1 = model.get_rsvp_lsp("D", "F", "lsp_d_f_1")
# Get demand objects
dmd_a_f_1 = model.get_demand_object("A", "F", "dmd_a_f_1")
dmd_d_f_1 = model.get_demand_object("D", "F", "dmd_d_f_1")
# Give lsp a lower than default metric
lsp_b_d_1.manual_metric = 15
model.update_simulation()
dmd_path_1 = [int_a_b, lsp_b_d_1, lsp_d_f_1]
# Confirm demand path
self.assertIn(dmd_path_1, dmd_a_f_1.path)
# Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 10)
self.assertEqual(lsp_b_d_2.traffic_on_lsp(model), 0)
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_d_f_1.traffic_on_lsp(model), 18.0)
# Verify demand paths
self.assertNotIn([int_a_g, int_g_f], dmd_a_f_1.path)
self.assertIn([int_a_b, lsp_b_d_1, lsp_d_f_1], dmd_a_f_1.path)
self.assertNotIn(lsp_b_d_2, dmd_a_f_1.path)
self.assertEqual(dmd_d_f_1.path, [[lsp_d_f_1]])
# Verify interface traffic
self.assertEqual(int_a_b.traffic, 10.0)
self.assertEqual(int_b_c.traffic, 10.0)
self.assertEqual(int_c_d.traffic, 10.0)
self.assertEqual(int_d_e.traffic, 18.0)
self.assertEqual(int_e_f.traffic, 18.0)
self.assertEqual(int_a_g.traffic, 0.0)
self.assertEqual(int_g_f.traffic, 0.0)
# Verify LSPs on interfaces
self.assertIn(lsp_b_d_1, int_b_c.lsps(model))
self.assertIn(lsp_b_d_2, int_b_c.lsps(model))
self.assertIn(lsp_b_d_1, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_c_e_1, int_c_d.lsps(model))
# Give lsp_b_d_1 a higher than default metric
lsp_b_d_1.manual_metric = 25
model.update_simulation()
dmd_path_2_1 = [int_a_g, int_g_f]
dmd_path_2_2 = [int_a_b, lsp_b_d_2, lsp_d_f_1]
# Confirm demand path
self.assertIn(dmd_path_2_1, dmd_a_f_1.path)
self.assertIn(dmd_path_2_2, dmd_a_f_1.path)
# Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_b_d_2.traffic_on_lsp(model), 5)
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_d_f_1.traffic_on_lsp(model), 13.0)
# Verify demand paths
self.assertIn([int_a_g, int_g_f], dmd_a_f_1.path)
self.assertNotIn(lsp_b_d_1, dmd_a_f_1.path)
self.assertIn([int_a_b, lsp_b_d_2, lsp_d_f_1], dmd_a_f_1.path)
self.assertEqual(dmd_d_f_1.path, [[lsp_d_f_1]])
# Verify interface traffic
self.assertEqual(int_a_b.traffic, 5.0)
self.assertEqual(int_b_c.traffic, 5.0)
self.assertEqual(int_c_d.traffic, 5.0)
self.assertEqual(int_d_e.traffic, 13.0)
self.assertEqual(int_e_f.traffic, 13.0)
self.assertEqual(int_a_g.traffic, 5.0)
self.assertEqual(int_g_f.traffic, 5.0)
# Verify LSPs on interfaces
self.assertIn(lsp_b_d_1, int_b_c.lsps(model))
self.assertIn(lsp_b_d_2, int_b_c.lsps(model))
self.assertIn(lsp_b_d_1, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_b_d_2, int_c_d.lsps(model))
self.assertIn(lsp_c_e_1, int_c_d.lsps(model))
# If an LSP from A to F is added, all traffic should take that LSP
def test_direct_lsp_preemption(self):
model = FlexModel.load_model_file(
"test/igp_shortcuts_model_mult_lsps_in_path.csv"
)
dmd_a_f_1 = model.get_demand_object("A", "F", "dmd_a_f_1")
model.add_rsvp_lsp("A", "F", "lsp_a_f_1")
lsp_a_f_1 = model.get_rsvp_lsp("A", "F", "lsp_a_f_1")
lsp_b_d_1 = model.get_rsvp_lsp("B", "D", "lsp_b_d_1")
int_a_g = model.get_interface_object("A-G", "A")
int_a_b = model.get_interface_object("A-B", "A")
model.update_simulation()
# Make sure dmd_a_f_1 takes lsp_a_f_1
self.assertEqual(lsp_a_f_1.demands_on_lsp(model), [dmd_a_f_1])
self.assertEqual(lsp_a_f_1.traffic_on_lsp(model), 10)
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model), 0)
self.assertEqual(lsp_b_d_1.demands_on_lsp(model), [])
# lsp_a_f_1 will take path with fewest hops
self.assertEqual(int_a_g.traffic, 10)
self.assertEqual(int_a_b.traffic, 0)
|
src/pipelines/epidemiology/sd_humdata.py | chrismayemba/covid-19-open-data | 430 | 28246 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame
from lib.cast import safe_int_cast
from lib.data_source import DataSource
from lib.time import datetime_isoformat
class SudanHumdataDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = (
dataframes[0]
.rename(
columns={
"Report Date": "date",
"State": "match_string",
"Confirmed Cases": "total_confirmed",
}
)
.drop([0])
)
# The dates in the provided CSV are incorrect for one of the reports.
# Replace with report date taken from text of report.
data.loc[
data["Source"]
== "https://reliefweb.int/sites/reliefweb.int/files/resources/Situation%20Report%20-%20Sudan%20-%207%20May%202020.pdf",
"date",
] = "5/11/2020"
data = data.drop(axis=1, columns=["As of Date", "Source"])
# Remove Abyei PCA, a disputed region with no data shown.
data = data[data["match_string"] != "Abyei PCA"]
# Data source uses different spelling from src/data/iso_3166_2_codes.csv
data["match_string"].replace({"Gedaref": "Al Qadarif"}, inplace=True)
data.date = data.date.apply(lambda x: datetime_isoformat(x, "%m/%d/%Y"))
# Sudan data includes empty cells where there are no confirmed cases.
# These get read in as NaN. Replace them with zeroes so that the
# grouped_diff call to get new confirmed cases works for a state's first
# day with a case.
data["total_confirmed"] = data["total_confirmed"].fillna(0).apply(safe_int_cast)
# Make sure all records have the country code
data["country_code"] = "SD"
# Output the results
return data
|
lexical-parse-float/etc/limits.py | sjurajpuchky/rust-lexical | 249 | 28248 | <gh_stars>100-1000
#!/usr/bin/env python3
"""
Generate the numeric limits for a given radix.
This is used for the fast-path algorithms, to calculate the
maximum number of digits or exponent bits that can be exactly
represented as a native value.
"""
import math
def is_pow2(value):
'''Calculate if a value is a power of 2.'''
floor = int(math.log2(value))
return value == 2**floor
def remove_pow2(value):
'''Remove a power of 2 from the value.'''
while math.floor(value / 2) == value / 2:
value //= 2
return value
def feature(radix):
'''Get the feature gate from the value'''
if radix == 10:
return ''
elif is_pow2(radix):
return 'if cfg!(feature = "power-of-two") '
return 'if cfg!(feature = "radix") '
def exponent_limit(radix, mantissa_size, max_exp):
'''
Calculate the exponent limit for a float, for a given
float type, where `radix` is the numerical base
for the float type, and mantissa size is the length
of the mantissa in bits. max_exp is the maximum
binary exponent, where all exponent bits except the lowest
are set (or, `2**(exponent_size - 1) - 1`).
'''
if is_pow2(radix):
# Can always be exactly represented. We can't handle
# denormal floats, however.
scaled = int(max_exp / math.log2(radix))
return (-scaled, scaled)
else:
# Positive and negative should be the same,
# since we need to find the maximum digit
# representable with mantissa digits.
# We first need to remove the highest power-of-
# two from the radix, since these will be represented
# with exponent digits.
base = remove_pow2(radix)
precision = mantissa_size + 1
exp_limit = int(precision / math.log2(base))
return (-exp_limit, exp_limit)
def mantissa_limit(radix, mantissa_size):
'''
Calculate mantissa limit for a float type, given
the radix and the length of the mantissa in bits.
'''
precision = mantissa_size + 1
return int(precision / math.log2(radix))
def all_limits(mantissa_size, exponent_size, type_name):
'''Print limits for all radixes.'''
max_exp = 2**(exponent_size - 1) - 1
print('/// Get the exponent limit as a const fn.')
print('#[inline(always)]')
print(f'pub const fn {type_name}_exponent_limit(radix: u32) -> (i64, i64) {{')
print(' match radix {')
for radix in range(2, 37):
exp_limit = exponent_limit(radix, mantissa_size, max_exp)
print(f' {radix} {feature(radix)}=> {exp_limit},')
print(' _ => (0, 0),')
print(' }')
print('}')
print('')
print('/// Get the mantissa limit as a const fn.')
print('#[inline(always)]')
print(f'pub const fn {type_name}_mantissa_limit(radix: u32) -> i64 {{')
print(' match radix {')
for radix in range(2, 37):
mant_limit = mantissa_limit(radix, mantissa_size)
print(f' {radix} {feature(radix)}=> {mant_limit},')
print(' _ => 0,')
print(' }')
print('}')
print('')
all_limits(23, 8, 'f32')
all_limits(52, 11, 'f64')
|
prototyper/build/stages/wsgi_app.py | vitalik/django-prototyper | 114 | 28261 | <filename>prototyper/build/stages/wsgi_app.py
from ..base import BuildStage
from pathlib import Path
TPL = """\"\"\"
WSGI config for {0} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
\"\"\"
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{0}")
application = get_wsgi_application()
"""
class WsgiStage(BuildStage):
def run(self):
wsgi_py = Path(self.build.settings_pckg_path) / 'wsgi.py'
wsgi_py.write_text(TPL.format(self.settings_module('settings')))
|
quetz/migrations/versions/30241b33d849_add_task_pending_state.py | maresb/quetz | 108 | 28300 | <gh_stars>100-1000
"""add task pending state
Revision ID: 30241b33d849
Revises: cd<PASSWORD>
Create Date: 2021-01-07 14:39:43.251123
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'cd404ed93cc0'
branch_labels = None
depends_on = None
def upgrade():
# manually entered
if op.get_context().dialect.name == 'postgresql':
# https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.migration.MigrationContext.autocommit_block
with op.get_context().autocommit_block():
op.execute("ALTER TYPE taskstatus ADD VALUE 'created'")
else:
# sqlite uses varchar + constraint for enum types
taskstatus_enum = sa.Enum(
'created',
'pending',
'running',
'success',
'failed',
'skipped',
name='taskstatus',
)
with op.batch_alter_table("tasks") as batch_op:
batch_op.alter_column("status", type_=taskstatus_enum)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
basic_auth/handler.py | andrei-shabanski/s3pypi | 249 | 28323 | import base64
import hashlib
import json
import logging
from dataclasses import dataclass
import boto3
log = logging.getLogger()
region = "us-east-1"
def handle(event: dict, context):
request = event["Records"][0]["cf"]["request"]
try:
authenticate(request["headers"])
except Exception as e:
log.error(repr(e))
return unauthorized
return request
def authenticate(headers: dict):
domain = headers["host"][0]["value"]
auth = headers["authorization"][0]["value"]
auth_type, creds = auth.split(" ")
if auth_type != "Basic":
raise ValueError("Invalid auth type: " + auth_type)
username, password = base64.b64decode(creds).decode().split(":")
user = get_user(domain, username)
if hash_password(password, user.password_salt) != user.password_hash:
raise ValueError("Invalid password for " + username)
@dataclass
class User:
username: str
password_hash: str
password_salt: str
def get_user(domain: str, username: str) -> User:
data = boto3.client("ssm", region_name=region).get_parameter(
Name=f"/s3pypi/{domain}/users/{username}",
WithDecryption=True,
)["Parameter"]["Value"]
return User(username, **json.loads(data))
def hash_password(password: str, salt: str) -> str:
return hashlib.sha1((password + salt).encode()).hexdigest()
unauthorized = dict(
status="401",
statusDescription="Unauthorized",
headers={
"www-authenticate": [
{"key": "WWW-Authenticate", "value": 'Basic realm="Login"'}
]
},
)
|
examples/py/async-generator-multiple-tickers.py | diwenshi61/ccxt | 24,910 | 28343 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import asyncio
import ccxt.async_support as ccxt
async def poll(tickers):
i = 0
kraken = ccxt.kraken()
while True:
symbol = tickers[i % len(tickers)]
yield (symbol, await kraken.fetch_ticker(symbol))
i += 1
await asyncio.sleep(kraken.rateLimit / 1000)
async def main():
async for (symbol, ticker) in poll(['BTC/USD', 'ETH/BTC', 'BTC/EUR']):
print(symbol, ticker)
asyncio.get_event_loop().run_until_complete(main())
|
assets/src/ba_data/python/bastd/mapdata/rampage.py | Benefit-Zebra/ballistica | 317 | 28357 | # Released under the MIT License. See LICENSE for details.
#
# This file was automatically generated from "rampage.ma"
# pylint: disable=all
points = {}
# noinspection PyDictCreation
boxes = {}
boxes['area_of_interest_bounds'] = (0.3544110667, 5.616383286,
-4.066055072) + (0.0, 0.0, 0.0) + (
19.90053969, 10.34051135, 8.16221072)
boxes['edge_box'] = (0.3544110667, 5.438284793, -4.100357672) + (
0.0, 0.0, 0.0) + (12.57718032, 4.645176013, 3.605557343)
points['ffa_spawn1'] = (0.5006944438, 5.051501304,
-5.79356326) + (6.626174027, 1.0, 0.3402012662)
points['ffa_spawn2'] = (0.5006944438, 5.051501304,
-2.435321368) + (6.626174027, 1.0, 0.3402012662)
points['flag1'] = (-5.885814199, 5.112162255, -4.251754911)
points['flag2'] = (6.700855451, 5.10270501, -4.259912982)
points['flag_default'] = (0.3196701116, 5.110914413, -4.292515158)
boxes['map_bounds'] = (0.4528955042, 4.899663734, -3.543675157) + (
0.0, 0.0, 0.0) + (23.54502348, 14.19991443, 12.08017448)
points['powerup_spawn1'] = (-2.645358507, 6.426340583, -4.226597191)
points['powerup_spawn2'] = (3.540102796, 6.549722855, -4.198476335)
points['shadow_lower_bottom'] = (5.580073911, 3.136491026, 5.341226521)
points['shadow_lower_top'] = (5.580073911, 4.321758709, 5.341226521)
points['shadow_upper_bottom'] = (5.274539479, 8.425373402, 5.341226521)
points['shadow_upper_top'] = (5.274539479, 11.93458162, 5.341226521)
points['spawn1'] = (-4.745706238, 5.051501304,
-4.247934288) + (0.9186962739, 1.0, 0.5153189341)
points['spawn2'] = (5.838590388, 5.051501304,
-4.259627405) + (0.9186962739, 1.0, 0.5153189341)
|
dockerpty/__init__.py | tedivm/dockerpty | 129 | 28374 | # dockerpty.
#
# Copyright 2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation, exec_create
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start()
def exec_command(
client, container, command, interactive=True, stdout=None, stderr=None, stdin=None):
"""
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
"""
exec_id = exec_create(client, container, command, interactive=interactive)
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
def start_exec(client, exec_id, interactive=True, stdout=None, stderr=None, stdin=None):
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
|
database/models.py | zdresearch/Nettacker | 884 | 28376 | <reponame>zdresearch/Nettacker
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (Column,
Integer,
Text,
DateTime)
Base = declarative_base()
class Report(Base):
"""
This class defines the table schema of the reports table. Any changes to the reports table need to be done here.
"""
__tablename__ = 'reports'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
scan_unique_id = Column(Text)
report_path_filename = Column(Text)
options = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class Report
"""
return "<Report(id={0}, scan_unique_id={1}, date={2}, report_path_filename={3})>".format(
self.id,
self.scan_unique_id,
self.date,
self.report_path_filename
)
class TempEvents(Base):
"""
This class defines the table schema of the reports table. Any changes to the reports table need to be done here.
"""
__tablename__ = 'temp_events'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
target = Column(Text)
module_name = Column(Text)
scan_unique_id = Column(Text)
event_name = Column(Text)
port = Column(Text)
event = Column(Text)
data = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class Report
"""
return '''
<scan_events(id={0}, target={1}, date={2}, module_name={3}, scan_unqiue_id={4},
port={5}, event={6}, data={7})>
'''.format(
self.id,
self.target,
self.date,
self.module_name,
self.scan_unique_id,
self.port,
self.event,
self.data
)
class HostsLog(Base):
"""
This class defines the table schema of the hosts_log table. Any changes to the reports hosts_log need to be done here.
"""
__tablename__ = 'scan_events'
id = Column(Integer, primary_key=True, autoincrement=True)
date = Column(DateTime)
target = Column(Text)
module_name = Column(Text)
scan_unique_id = Column(Text)
port = Column(Text)
event = Column(Text)
json_event = Column(Text)
def __repr__(self):
"""
returns a printable representation of the object of the class HostsLog
"""
return '''
<scan_events(id={0}, target={1}, date={2}, module_name={3}, scan_unqiue_id={4},
port={5}, event={6}, json_event={7})>
'''.format(
self.id,
self.target,
self.date,
self.module_name,
self.scan_unique_id,
self.port,
self.event,
self.json_event
)
|
CodeIA/venv/Lib/site-packages/absl/testing/_parameterized_async.py | Finasty-lab/IA-Python | 38,667 | 28381 | <reponame>Finasty-lab/IA-Python
# Lint as: python3
# Copyright 2020 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private module implementing async_wrapped method for wrapping async tests.
This is a separate private module so that parameterized still optionally
supports Python 2 syntax.
"""
import functools
import inspect
def async_wrapped(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return wrapper
def iscoroutinefunction(func):
return inspect.iscoroutinefunction(func)
|
services/core/Ambient/ambient/agent.py | gnmerritt/volttron | 406 | 28414 | <gh_stars>100-1000
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official,
# policies either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
__docformat__ = 'reStructuredText'
import logging
import datetime
import pytz
import sys
import grequests
# requests should be imported after grequests as requests imports ssl and grequests patches ssl
import requests
import pkg_resources
from volttron.platform.agent import utils
from volttron.platform.vip.agent import RPC
from volttron.platform.agent.utils import format_timestamp
from volttron.platform.agent.base_weather import BaseWeatherAgent
from volttron.platform import jsonapi
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.1"
def ambient(config_path, **kwargs):
"""
Parses the Agent configuration and returns an instance of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: Ambient
:rtype: Ambient
"""
try:
config = utils.load_config(config_path)
except Exception:
config = {}
if not config:
_log.error("Ambient agent configuration: ".format(config))
for key in ["api_key", "application_key"]:
if not config.get(key) or not isinstance(config.get(key), str):
raise RuntimeError("Ambient agent must be configured with '{}' key.".format(key))
_log.debug("config_dict before init: {}".format(config))
utils.update_kwargs_with_config(kwargs, config)
return Ambient(**kwargs)
class Ambient(BaseWeatherAgent):
"""
The Ambient agent requires having an API key to interact with the remote API. The agent offers a performance_mode
configuration option which allows users to limit the amount of data returned by the API.
"""
def __init__(self, application_key="", **kwargs):
super(Ambient, self).__init__(**kwargs)
_log.debug("vip_identity: " + self.core.identity)
self.headers = {"Accept": "application/json",
"Accept-Language": "en-US"
}
self.remove_service("get_hourly_historical")
self.remove_service("get_hourly_forecast")
self.app_key = application_key
self.last_service_call_timestamp = None
@RPC.export
def get_version(self):
"""
Provides the current version of the agent.
:return: current version number in string format.
"""
return __version__
def validate_location(self, service_name, location):
"""
Indicates whether the location dictionary provided matches the format required by the remote weather API
:param service_name: name of the remote API service
:param location: location dictionary to provide in the remote API url
:return: True if the location matches the required format else False
"""
return isinstance(location.get("location", None), str)
def get_update_interval(self, service_name):
"""
Indicates the interval between remote API updates
:param service_name: requested service endpoint
:return: datetime timedelta representing the time interval
"""
if service_name == "get_current_weather":
return datetime.timedelta(minutes=5)
else:
return None
def get_api_description(self, service_name):
"""
Provides a human-readable description of the various endpoints provided by the agent
:param service_name: requested service endpoint
:return: Human-readable description string
"""
if service_name is "get_current_weather":
"Provides current weather observations for locations by their corresponding Ambient weather station name " \
"via RPC (Requires {'location': <station location string>})"
else:
raise RuntimeError(
"Service {} is not implemented by Ambient.".format(service_name))
def get_point_name_defs_file(self):
"""
Constructs the point name mapping dict from the mapping csv.
:return: dictionary containing a mapping of service point names to standard point names with optional
"""
# returning resource file instead of stream, as csv.DictReader require file path or file like object opened in
# text mode.
return pkg_resources.resource_filename(__name__, "data/name_mapping.csv")
def query_current_weather(self, location):
"""
Retrieve data from the Ambient API, return formatted current data and store forecast data in cache
:param location: location dictionary requested by the user
:return: Timestamp and data for current data from the Ambient API
"""
ambient_response = self.make_request()
location_response = None
current_time = None
for record in ambient_response:
record_location = None
record_info = record.pop("info")
if record_info:
record_location = record_info.get("location", "")
if record_location:
weather_data = record.get("lastData", {})
weather_data["macAddress"] = record.pop("macAddress", "")
weather_data["name"] = record_info.get("name", "")
# "date": "2019-04-25T17:09:00.000Z"
weather_tz_string = weather_data.get('tz', None)
if weather_tz_string:
weather_tz = pytz.timezone(weather_tz_string)
else:
weather_tz = pytz.utc
weather_date = datetime.datetime.strptime(
weather_data.pop("date"), "%Y-%m-%dT%H:%M:%S.%fZ").astimezone(weather_tz)
if location["location"] == record_location:
current_time = format_timestamp(weather_date)
location_response = weather_data
else:
weather_data = self.apply_mapping(weather_data)
self.store_weather_records("get_current_weather",
[jsonapi.dumps({"location": record_location}),
weather_date,
jsonapi.dumps(weather_data)])
else:
raise RuntimeError("API record contained improper 'info' format")
return current_time, location_response
def query_forecast_service(self, service, location, quantity, forecast_start):
"""
Unimplemented method stub
:param service: forecast service type of weather data to return
:param location: location dictionary requested during the RPC call
:param quantity: number of records to return, used to generate Time Machine requests after the forecast request
:param forecast_start: forecast results that are prior to this timestamp will be filtered by base weather agent
:return: Timestamp and data returned by the Ambient weather API response
"""
raise NotImplementedError
def make_request(self):
"""
Request data from the Ambient Weather API
An example of the return value is as follows
[
{
"macAddress": "18:93:D7:3B:89:0C",
"lastData": {
"dateutc": 1556212140000,
"tempinf": 71.9,
"humidityin": 31,
"battout": "1",
"temp1f": 68.7,
"humidity1": 36,
"batt1": "1",
"date": "2019-04-25T17:09:00.000Z"
},
"info": {
"name": "Home B WS",
"location": "Lab Home B"
}
},
{
"macAddress": "50:F1:4A:F7:3C:C4",
"lastData": {
"dateutc": 1556211960000,
"tempinf": 82.5,
"humidityin": 27,
"battout": "1",
"temp1f": 68.5,
"humidity1": 42,
"batt1": "1",
"date": "2019-04-25T17:06:00.000Z"
},
"info": {
"name": "Home A WS",
"location": "Lab Home A"
}
}
]
:return:
"""
# AuthenticationTwo API Keys are required for all REST API requests:applicationKey - identifies the
# developer / application. To request an application key please email <EMAIL>apiKey -
# grants access to past/present data for a given user's devices. A typical consumer-facing application will
# initially ask the user to create an apiKey on their Ambient.net account page
# (https://dashboard.ambientweather.net/account) and paste it into the app. Developers for personal or
# in-house apps will also need to create an apiKey on their own account page.
# Rate LimitingAPI requests are capped at 1 request/second for each user's apiKey and 3 requests/second
# per applicationKey. When this limit is exceeded, the API will return a 429 response code.
# Please be kind to our servers :)
# If the previous call to the API was at least 3 seconds ago - this is a constraint set by Ambient
if not self.last_service_call_timestamp or (
datetime.datetime.now() - self.last_service_call_timestamp).total_seconds() > 3:
url = 'https://api.ambientweather.net/v1/devices?applicationKey=' + self.app_key + '&apiKey=' + \
self._api_key
_log.info("requesting url: {}".format(url))
grequest = [grequests.get(url, verify=requests.certs.where(), headers=self.headers, timeout=30)]
gresponse = grequests.map(grequest)[0]
if gresponse is None:
raise RuntimeError("get request did not return any response")
try:
response = jsonapi.loads(gresponse.content)
self.last_service_call_timestamp = datetime.datetime.now()
return response
except ValueError:
self.last_service_call_timestamp = datetime.datetime.now()
self.generate_response_error(url, gresponse.status_code)
else:
raise RuntimeError("Previous API call to Ambient service is too recent, please wait at least 3 seconds "
"between API calls.")
def query_hourly_forecast(self, location):
"""
Unimplemented method stub
:param location: currently accepts lat/long location dictionary format only
:return: time of forecast prediction as a timestamp string, and a list of
"""
raise NotImplementedError
def query_hourly_historical(self, location, start_date, end_date):
"""
Unimplemented method stub
:param location: no format currently determined for history.
:param start_date: Starting date for historical weather period.
:param end_date: Ending date for historical weather period.
:return: NotImplementedError
"""
raise NotImplementedError
def generate_response_error(self, url, response_code):
"""
Raises a descriptive runtime error based on the response code returned by a service.
:param url: actual url used for requesting data from Ambient
:param response_code: Http response code returned by a service following a request
"""
code_x100 = int(response_code / 100)
if code_x100 == 2:
raise RuntimeError("Remote API returned no data(code:{}, url:{})".format(response_code, url))
elif code_x100 == 3:
raise RuntimeError(
"Remote API redirected request, but redirect failed (code:{}, url:{})".format(response_code, url))
elif code_x100 == 4:
raise RuntimeError(
"Request ({}) rejected by remote API: Remote API returned Code {}".format(url, response_code))
elif code_x100 == 5:
raise RuntimeError(
"Remote API returned invalid response (code:{}, url:{})".format(response_code, url))
else:
raise RuntimeError(
"API request failed with unexpected response code (code:{}, url:{})".format(response_code, url))
def main():
"""Main method called to start the agent."""
utils.vip_main(ambient,
version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
src/torchphysics/utils/data/__init__.py | uwe-iben/torchphysics | 203 | 28416 | from .dataloader import PointsDataset, PointsDataLoader |
rls/algorithms/single/ddqn.py | StepNeverStop/RLs | 371 | 28421 | #!/usr/bin/env python3
# encoding: utf-8
import torch.nn.functional as F
from rls.algorithms.single.dqn import DQN
from rls.common.decorator import iton
from rls.utils.torch_utils import n_step_return
class DDQN(DQN):
"""
Double DQN, https://arxiv.org/abs/1509.06461
Double DQN + LSTM, https://arxiv.org/abs/1908.06040
"""
policy_mode = 'off-policy'
def __init__(self, **kwargs):
super().__init__(**kwargs)
@iton
def _train(self, BATCH):
q = self.q_net(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
q_next = self.q_net(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
q_target_next = self.q_net.t(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
next_max_action = q_next.argmax(-1) # [T, B]
next_max_action_one_hot = F.one_hot(next_max_action.squeeze(), self.a_dim).float() # [T, B, A]
q_eval = (q * BATCH.action).sum(-1, keepdim=True) # [T, B, 1]
q_target_next_max = (q_target_next * next_max_action_one_hot).sum(-1, keepdim=True) # [T, B, 1]
q_target = n_step_return(BATCH.reward,
self.gamma,
BATCH.done,
q_target_next_max,
BATCH.begin_mask).detach() # [T, B, 1]
td_error = q_target - q_eval # [T, B, 1]
q_loss = (td_error.square() * BATCH.get('isw', 1.0)).mean() # 1
self.oplr.optimize(q_loss)
return td_error, {
'LEARNING_RATE/lr': self.oplr.lr,
'LOSS/loss': q_loss,
'Statistics/q_max': q_eval.max(),
'Statistics/q_min': q_eval.min(),
'Statistics/q_mean': q_eval.mean()
}
|
hyperglass/execution/drivers/agent.py | blkmajik/hyperglass | 298 | 28443 | """Execute validated & constructed query on device.
Accepts input from front end application, validates the input and
returns errors if input is invalid. Passes validated parameters to
construct.py, which is used to build & run the Netmiko connections or
hyperglass-frr API calls, returns the output back to the front end.
"""
# Standard Library
from ssl import CertificateError
from typing import Iterable
# Third Party
import httpx
# Project
from hyperglass.log import log
from hyperglass.util import parse_exception
from hyperglass.encode import jwt_decode, jwt_encode
from hyperglass.exceptions import RestError, ResponseEmpty
from hyperglass.configuration import params
# Local
from ._common import Connection
class AgentConnection(Connection):
"""Connect to target device via hyperglass-agent."""
async def collect(self) -> Iterable: # noqa: C901
"""Connect to a device running hyperglass-agent via HTTP."""
log.debug("Query parameters: {}", self.query)
client_params = {
"headers": {"Content-Type": "application/json"},
"timeout": params.request_timeout,
}
if self.device.ssl is not None and self.device.ssl.enable:
with self.device.ssl.cert.open("r") as file:
cert = file.read()
if not cert:
raise RestError(
"SSL Certificate for device {d} has not been imported",
level="danger",
d=self.device.name,
)
http_protocol = "https"
client_params.update({"verify": str(self.device.ssl.cert)})
log.debug(
(
f"Using {str(self.device.ssl.cert)} to validate connection "
f"to {self.device.name}"
)
)
else:
http_protocol = "http"
endpoint = "{protocol}://{address}:{port}/query/".format(
protocol=http_protocol, address=self.device._target, port=self.device.port
)
log.debug("URL endpoint: {}", endpoint)
try:
async with httpx.AsyncClient(**client_params) as http_client:
responses = ()
for query in self.query:
encoded_query = await jwt_encode(
payload=query,
secret=self.device.credential.password.get_secret_value(),
duration=params.request_timeout,
)
log.debug("Encoded JWT: {}", encoded_query)
raw_response = await http_client.post(
endpoint, json={"encoded": encoded_query}
)
log.debug("HTTP status code: {}", raw_response.status_code)
raw = raw_response.text
log.debug("Raw Response:\n{}", raw)
if raw_response.status_code == 200:
decoded = await jwt_decode(
payload=raw_response.json()["encoded"],
secret=self.device.credential.password.get_secret_value(),
)
log.debug("Decoded Response:\n{}", decoded)
responses += (decoded,)
elif raw_response.status_code == 204:
raise ResponseEmpty(
params.messages.no_output, device_name=self.device.name,
)
else:
log.error(raw_response.text)
except httpx.exceptions.HTTPError as rest_error:
msg = parse_exception(rest_error)
log.error("Error connecting to device {}: {}", self.device.name, msg)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=msg,
)
except OSError as ose:
log.critical(str(ose))
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error="System error",
)
except CertificateError as cert_error:
log.critical(str(cert_error))
msg = parse_exception(cert_error)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=f"{msg}: {cert_error}",
)
if raw_response.status_code != 200:
log.error("Response code is {}", raw_response.status_code)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.general,
)
if not responses:
log.error("No response from device {}", self.device.name)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.no_response,
)
return responses
|
examples/hover_example.py | kail85/mpldatacursor | 165 | 28456 | """
Demonstrates the hover functionality of mpldatacursor as well as point labels
and a custom formatting function. Notice that overlapping points have both
labels displayed.
"""
import string
import matplotlib.pyplot as plt
import numpy as np
from mpldatacursor import datacursor
np.random.seed(1977)
x, y = np.random.random((2, 26))
labels = string.ascii_lowercase
fig, ax = plt.subplots()
ax.scatter(x, y, s=200)
ax.set_title('Mouse over a point')
# Show only the point label and allow nicer formatting if points overlap
formatter = lambda **kwargs: ', '.join(kwargs['point_label'])
datacursor(hover=True, formatter=formatter, point_labels=labels)
plt.show()
|
tapas/utils/attention_utils_test.py | apurvak/tapas | 816 | 28458 | <filename>tapas/utils/attention_utils_test.py
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from tapas.models.bert import modeling
from tapas.utils import attention_utils
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class AttentionUtilsTest(tf.test.TestCase):
def test_matches_token_type_id(self):
with self.cached_session() as sess:
row_ids = sess.run(
tf.constant([[1, 2, 2], [5, 5, 6], [1, 2, 3], [4, 5, 6]]))
result = attention_utils._matches_token_type_id(row_ids)
expected_result = sess.run(
tf.constant([
[[1, 0, 0], [0, 1, 1], [0, 1, 1]],
[[1, 1, 0], [1, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
]))
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id(self):
with self.cached_session() as sess:
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
bucket_ids = tf.constant([[0, 0, 0, 1, 2, 1, 2, 3, 1, 2, 3, 3, 4, 4, 4]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=3,
header_size=3,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id_on_distinct_columns(self):
with self.cached_session() as sess:
# When bucket size is 1 and columns_ids are a permutation of 0..n-1 then
# the bucket_ids will match the column_ids
column_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1]])
bucket_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=1,
header_size=1,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id_with_header(self):
with self.cached_session() as sess:
# Similar to the distinct column test, but now we have two header tokens
column_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1]])
bucket_ids = tf.constant([[0, 1, 2, 0, 4, 3]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=1,
header_size=2,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_compute_headwise_sparse_attention_mask(self):
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
segment_ids = tf.constant([[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
result = sess.run(
attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=2,
num_column_heads=3,
bucket_size=0,
header_size=None,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_row_result = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A1
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A2
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B2
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C0
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_column_result = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # A1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # A2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # B2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # C0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]
expected_result = sess.run(
tf.constant([expected_row_result] * 2 + [expected_column_result] * 3))
self.assertAllEqual(result, expected_result)
# With bucket size 2 no extra attention should be pruned
result = sess.run(
attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=2,
num_column_heads=3,
bucket_size=3,
header_size=None,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
# The attention of the padding tokens changes but it has no impact
self.assertAllEqual(result[:, :, -4], expected_result[:, :, -4])
def test_compute_sparse_attention_mask(self):
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
segment_ids = tf.constant([[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
result = sess.run(
attention_utils.compute_sparse_attention_mask(
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
expected_result = sess.run(
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
tf.constant([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # A1
[1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # A2
[1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0], # B2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C0
[1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]))
self.assertAllEqual(result, expected_result)
def compare_efficient_and_vanilla_sparse_attention(
self, sess, row_ids, column_ids,
embedding_dim, num_row_heads, num_column_heads,
bucket_size, header_size, seq_length, input_size,
first_segment_size, rows_sorted):
tf.random.set_random_seed(42)
num_attention_heads = num_row_heads + num_column_heads
pad_size = seq_length - input_size
second_segment_size = input_size - first_segment_size
input_mask = tf.constant([[1] * input_size + [0] * pad_size])
segment_ids = tf.constant([[0] * first_segment_size +
[1] * second_segment_size + [0] * pad_size])
input_tensor = tf.random.normal(shape=[1, seq_length, 128])
attention_mask = attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=num_row_heads,
num_column_heads=num_column_heads,
bucket_size=bucket_size,
header_size=header_size,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids)
expected_result_op = modeling.attention_layer(
input_tensor,
input_tensor,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=embedding_dim)[0][0, :input_size]
result_ops = []
for sort_after_projection in [True, False]:
attention_layer = attention_utils.create_bucketed_attention_layer(
input_mask=input_mask,
input_header=tf.math.equal(segment_ids, 0),
bucket_size=bucket_size,
header_size=header_size,
sort_after_projection=sort_after_projection,
token_type_ids=[(num_row_heads, rows_sorted, row_ids),
(num_column_heads, False, column_ids)])
result_ops.append(
attention_layer(
input_tensor,
input_tensor,
num_attention_heads=num_attention_heads,
size_per_head=embedding_dim)[0][0, :input_size])
sess.run(tf.global_variables_initializer())
expected_result, result1, result2 = sess.run([expected_result_op] +
result_ops)
self.assertAllClose(result1, expected_result)
self.assertAllClose(result2, expected_result)
def test_efficient_sparse_attention_matches_vanilla_version(self):
# Tests that computing bucketed and non-bucketed attention for random
# embeddings produces the same result.
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
self.compare_efficient_and_vanilla_sparse_attention(
sess,
row_ids=row_ids,
column_ids=column_ids,
embedding_dim=32,
num_row_heads=2,
num_column_heads=4,
bucket_size=3,
header_size=3,
seq_length=15,
input_size=11,
first_segment_size=2,
rows_sorted=True)
def test_efficient_sparse_attention_random_ids_matches_vanilla_version(self):
# Tests that computing bucketed and non-bucketed attention for random
# attributes not mapping to real columns yield the same results.
with self.cached_session() as sess:
seq_length = 14
row_ids = tf.random.uniform(
shape=[1, seq_length], maxval=20, dtype=tf.int32)
column_ids = tf.random.uniform(
shape=[1, seq_length], maxval=20, dtype=tf.int32)
self.compare_efficient_and_vanilla_sparse_attention(
sess,
row_ids=row_ids,
column_ids=column_ids,
embedding_dim=16,
num_row_heads=5,
num_column_heads=1,
bucket_size=2,
header_size=4,
seq_length=seq_length,
input_size=11,
first_segment_size=2,
rows_sorted=False)
if __name__ == "__main__":
tf.test.main()
|
VA/main/utils/cluster.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 343 | 28464 | import numpy as np
# import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
# def plothist(x):
# vmin = x.min()-1
# vmax = x.max()+1
# bins = np.arange(vmin, vmax, (vmax - vmin)/50)
# plt.hist(x, bins=bins)
# plt.show()
# def scatterpred(pred):
# plt.scatter(pred[:,0], pred[:,1])
# plt.show()
# def scatter_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 8)
# plt.scatter(c[:,0], c[:,1], color='r')
# plt.show()
def most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
assign[d.argmin()] += 1
return assign.argmax()
def mean_on_most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
mean = np.zeros(c.shape)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
idx = d.argmin()
assign[idx] += 1
mean[idx,:] += x[i]
idx = assign.argmax()
return mean[idx,:] / assign[idx]
# def best_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 3)
# plt.scatter(c[:,0], c[:,1], color='g')
# n = most_assigned(pred, c)
# plt.scatter(c[n,0], c[n,1], color='r')
# plt.show()
def clustering_joints(y_pred, k=3):
_,nb_spl,nb_joints,dim = y_pred.shape
y = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j]
c,v = kmeans(d, k)
n = most_assigned(d, c)
y[s,j,:] = c[n]
return y
def clustering_grid(y_pred, size=10):
_, nb_spl, nb_joints, dim = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j,:]
xmin = d[:,0].min()
ymin = d[:,1].min()
xmax = d[:,0].max()
ymax = d[:,1].max()
xstep = (xmax - xmin) / size
ystep = (ymax - ymin) / size
c = np.zeros((size * size, dim))
for x in range(size):
for y in range(size):
c[x + size*y, 0] = xmin + (x + 0.5) * xstep
c[x + size*y, 1] = ymin + (y + 0.5) * ystep
yp[s,j,:] = mean_on_most_assigned(d, c)
return yp
def mean_joints(y_pred):
_, nb_spl, dim, nb_joints = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, dim, nb_joints))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,:,j]
yp[s, 0, j] = d[:,0].mean()
yp[s, 1, j] = d[:,1].mean()
return yp
|
moto/logs/metric_filters.py | gtourkas/moto | 5,460 | 28490 | def find_metric_transformation_by_name(metric_transformations, metric_name):
for metric in metric_transformations:
if metric["metricName"] == metric_name:
return metric
def find_metric_transformation_by_namespace(metric_transformations, metric_namespace):
for metric in metric_transformations:
if metric["metricNamespace"] == metric_namespace:
return metric
class MetricFilters:
def __init__(self):
self.metric_filters = []
def add_filter(
self, filter_name, filter_pattern, log_group_name, metric_transformations
):
self.metric_filters.append(
{
"filterName": filter_name,
"filterPattern": filter_pattern,
"logGroupName": log_group_name,
"metricTransformations": metric_transformations,
}
)
def get_matching_filters(
self, prefix=None, log_group_name=None, metric_name=None, metric_namespace=None
):
result = []
for f in self.metric_filters:
prefix_matches = prefix is None or f["filterName"].startswith(prefix)
log_group_matches = (
log_group_name is None or f["logGroupName"] == log_group_name
)
metric_name_matches = (
metric_name is None
or find_metric_transformation_by_name(
f["metricTransformations"], metric_name
)
)
namespace_matches = (
metric_namespace is None
or find_metric_transformation_by_namespace(
f["metricTransformations"], metric_namespace
)
)
if (
prefix_matches
and log_group_matches
and metric_name_matches
and namespace_matches
):
result.append(f)
return result
def delete_filter(self, filter_name=None, log_group_name=None):
for f in self.metric_filters:
if f["filterName"] == filter_name and f["logGroupName"] == log_group_name:
self.metric_filters.remove(f)
return self.metric_filters
|
tests/test_win_app.py | koyoki/Airtest | 6,140 | 28502 | # encoding=utf-8
from airtest.core.win import Windows
import unittest
import numpy
import time
from testconf import try_remove
SNAPSHOT = "win_snapshot.png"
class TestWin(unittest.TestCase):
@classmethod
def setUpClass(cls):
w = Windows()
w.start_app("calc")
time.sleep(1)
cls.windows = Windows(title_re=".*计算器.*".decode("utf-8"))
def test_snapshot(self):
try_remove(SNAPSHOT)
result = self.windows.snapshot(filename=SNAPSHOT)
self.assertIsInstance(result, numpy.ndarray)
try_remove(SNAPSHOT)
def test_touch(self):
self.windows.touch((11, 11))
def test_swipe(self):
self.windows.swipe((11, 11), (100, 100))
@classmethod
def tearDownClass(cls):
cls.windows.app.kill()
if __name__ == '__main__':
unittest.main()
|
src/tools/nuscenes-devkit/prediction/tests/test_backbone.py | jie311/TraDeS | 475 | 28511 | import unittest
import torch
from torchvision.models.resnet import BasicBlock, Bottleneck
from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError("Backbone layer block not supported!")
return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280) |
test/pytest/service-bluetooth/test_pairing_hmi_perspective.py | bitigchi/MuditaOS | 369 | 28519 | # Copyright (c) 2017-2021, <NAME>. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
from harness import log
from harness.dom_parser_utils import *
from harness.interface.defs import key_codes
from bt_fixtures import *
@pytest.mark.rt1051
@pytest.mark.usefixtures("bt_all_devices")
@pytest.mark.usefixtures("bt_reset")
@pytest.mark.usefixtures("bt_main_window")
@pytest.mark.usefixtures("phone_in_desktop")
@pytest.mark.usefixtures("phone_unlocked")
@pytest.mark.skipif("not config.getvalue('--bt_device')", reason='--bt_device was not specified')
def test_bt_pairing_hmi(harness, bt_device):
if not bt_device:
return
bt_device_name = bt_device
current_window_content = get_window_content(harness, 1)
is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name )
if not is_device_in_history :
log.info("Device {} not in all devices history, scanning...".format(bt_device_name))
harness.connection.send_key_code(key_codes["left"])
max_try_count = 5
for _ in range(max_try_count, 0, -1) :
time.sleep(2)
current_window_content = get_window_content(harness, 1)
is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name )
if is_device_in_history:
break
log.info("Device {} not found, retrying...".format(bt_device_name))
assert max_try_count
current_window_content = get_window_content(harness, 1)
parent_of_list_items = find_parent(current_window_content, 'ListItem')
steps_to_navigate_down = get_child_number_that_contains_recursively(parent_of_list_items, [('TextValue', bt_device_name)])
assert steps_to_navigate_down > -1
log.info("Navigating to the {} device, {} down".format(bt_device_name, steps_to_navigate_down ) )
for _ in range(steps_to_navigate_down) :
harness.connection.send_key_code(key_codes["down"])
log.info("Checking if device {} is focused...".format(bt_device_name))
current_window_content = get_window_content(harness, 1)
parent_of_list_items = find_parent(current_window_content, 'ListItem')
assert item_has_child_that_contains_recursively( parent_of_list_items, [('TextValue', bt_device_name), ('Focus', True)] )
|
test/test_mean_average_precision.py | JuanchoWang/xcenternet | 171 | 28531 | import numpy as np
import tensorflow as tf
import unittest
from xcenternet.model.evaluation.overlap import compute_overlap
from xcenternet.model.evaluation.mean_average_precision import MAP
class TestMeanAveragePrecision(unittest.TestCase):
def setUp(self):
self.map_bboxes = np.array(
[
[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]],
[[0, 0, 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]],
],
dtype=np.float64,
)
self.map_labels = np.array([[0, 0, 1], [0, 0, 0]])
self.map_predictions = np.array(
[
[
[10, 40, 40, 90, 0.1, 0], # overlap 1.00 with bbox #2, low prob
[60, 10, 90, 60, 0.5, 0], # overlap 0.29 with bbox #1
[10, 30, 50, 90, 0.7, 0], # overlap 0.625 with bbox #2
[0, 0, 100, 90, 0.7, 1], # overlap 0.9 with bbox #3
[0, 0, 100, 80, 0.7, 1], # overlap 0.8 with bbox #3
],
[
[20, 20, 30, 50, 0.6, 0], # 0.21 overlap with #2
[2, 0, 10, 11, 0.8, 0], # overlap with #1
[0, 2, 14, 10, 0.9, 0], # overlap with #1
[0, 0, 10, 10, 0.7, 1], # no ground truth for 1
[80, 20, 100, 50, 0.1, 1], # no ground truth for 1
],
],
dtype=np.float32,
)
self.map_masks = np.array([[1, 1, 1], [1, 1, 1]], dtype=np.float32)
self.result_1 = {"overall": 3 / 4, "weighted": 2 / 3, "per_class": {0: (0.5, 2), 1: (1.0, 1)}}
self.result_both = {"overall": 2 / 3, "weighted": 4 / 9, "per_class": {0: (1 / 3, 5), 1: (1.0, 1)}}
def test_compute_overlap(self):
boxes1 = np.array([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)
boxes2 = np.array([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np.float64)
overlap = compute_overlap(boxes1, boxes2)
self.assertAlmostEqual(1.0, overlap[0][0])
self.assertAlmostEqual(6 / 11, overlap[0][1])
self.assertAlmostEqual(0.0, overlap[0][2])
self.assertAlmostEqual(0.5, overlap[1][0])
self.assertAlmostEqual(4 / 9, overlap[1][1])
self.assertAlmostEqual(0.0, overlap[1][2])
def test_map_update_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
mean_average_precision.update_state(self.map_predictions[1], self.map_bboxes[1], self.map_labels[1])
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def test_map_update_batch_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant([self.map_predictions[0]]),
tf.constant([self.map_bboxes[0]]),
tf.constant([self.map_labels[0]]),
tf.constant([self.map_masks[0]]),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_batch_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant(self.map_predictions),
tf.constant(self.map_bboxes),
tf.constant(self.map_labels),
tf.constant(self.map_masks),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def _assert_map(self, first, second):
self.assertAlmostEqual(first["overall"], second["overall"])
self.assertAlmostEqual(first["weighted"], second["weighted"])
self.assertAlmostEqual(first["per_class"][0][0], second["per_class"][0][0]) # mAP
self.assertAlmostEqual(first["per_class"][0][1], second["per_class"][0][1]) # num objects
self.assertAlmostEqual(first["per_class"][1][0], second["per_class"][1][0]) # mAP
self.assertAlmostEqual(first["per_class"][1][1], second["per_class"][1][1]) # num objects
if __name__ == "__main__":
unittest.main()
|
tests/integration/test_user_defined_object_persistence/test.py | pdv-ru/ClickHouse | 15,577 | 28535 | <filename>tests/integration/test_user_defined_object_persistence/test.py
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', stay_alive=True)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_persistence():
create_function_query1 = "CREATE FUNCTION MySum1 AS (a, b) -> a + b"
create_function_query2 = "CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b"
instance.query(create_function_query1)
instance.query(create_function_query2)
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
instance.restart_clickhouse()
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
instance.query("DROP FUNCTION MySum2")
instance.query("DROP FUNCTION MySum1")
instance.restart_clickhouse()
assert "Unknown function MySum1" in instance.query_and_get_error("SELECT MySum1(1, 2)")
assert "Unknown function MySum2" in instance.query_and_get_error("SELECT MySum2(1, 2)")
|
underworld/conditions/_conditions.py | longgangfan/underworld2 | 116 | 28556 | <filename>underworld/conditions/_conditions.py
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Underworld geophysics modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This module contains conditions used for applying constraints on model dynamics.
"""
import underworld as uw
import underworld._stgermain as _stgermain
import underworld.libUnderworld as libUnderworld
import abc
class SystemCondition(_stgermain.StgCompoundComponent, metaclass = abc.ABCMeta):
def _add_to_stg_dict(self,componentDict):
pass
def __init__(self, variable, indexSetsPerDof):
if not isinstance( variable, uw.mesh.MeshVariable ):
raise TypeError("Provided variable must be of class 'MeshVariable'.")
self._variable = variable
if isinstance( indexSetsPerDof, uw.container.IndexSet ):
indexSets = ( indexSetsPerDof, )
elif isinstance( indexSetsPerDof, (list,tuple)):
indexSets = indexSetsPerDof
else:
raise TypeError("You must provide the required 'indexSetsPerDof' item\n"+
"as a list or tuple of 'IndexSet' items.")
for guy in indexSets:
if not isinstance( guy, (uw.container.IndexSet,type(None)) ):
raise TypeError("Provided list must only contain objects of 'NoneType' or type 'IndexSet'.")
self._indexSets = indexSets
if variable.nodeDofCount != len(self._indexSets):
raise ValueError("Provided variable has a nodeDofCount of {}, however you have ".format(variable.nodeDofCount)+
"provided {} index set(s). You must provide an index set for each degree ".format(len(self._indexSets))+
"of freedom of your variable, but no more.")
# ok, lets setup the c array
libUnderworld.StGermain._PythonVC_SetupIndexSetArray(self._cself,len(self._indexSets))
# now, lets add the indexSet objects
for position,set in enumerate(self._indexSets):
if set:
libUnderworld.StGermain._PythonVC_SetIndexSetAtArrayPosition( self._cself, set._cself, position );
@property
def indexSetsPerDof(self):
""" See class constructor for details. """
return self._indexSets
@property
def variable(self):
""" See class constructor for details. """
return self._variable
class DirichletCondition(SystemCondition):
"""
The DirichletCondition class provides the required functionality to imposed Dirichlet
conditions on your differential equation system.
The user is simply required to flag which nodes/DOFs should be considered by the system
to be a Dirichlet condition. The values at the Dirichlet nodes/DOFs is then left
untouched by the system.
Parameters
----------
variable : underworld.mesh.MeshVariable
This is the variable for which the Dirichlet condition applies.
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Dirichlet conditions.
Note that the user must provide an index set for each degree of
freedom of the variable. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Notes
-----
Note that it is necessary for the user to set the required value on the variable, possibly
via the numpy interface.
Constructor must be called collectively all processes.
Example
-------
Basic setup and usage of Dirichlet conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> IWalls = linearMesh.specialSets["MinI_VertexSet"] + linearMesh.specialSets["MaxI_VertexSet"] # get some wall index sets
>>> JWalls = linearMesh.specialSets["MinJ_VertexSet"] + linearMesh.specialSets["MaxJ_VertexSet"]
>>> freeSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls,JWalls) ) # this will give free slip sides
>>> noSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls+JWalls,IWalls+JWalls) ) # this will give no slip sides
"""
_objectsDict = { "_pyvc": "PythonVC" }
_selfObjectName = "_pyvc"
def __init__(self, variable, indexSetsPerDof):
super(DirichletCondition,self).__init__(variable, indexSetsPerDof)
class NeumannCondition(SystemCondition):
"""
This class defines Neumann conditions for a differential equation.
Neumann conditions specifiy a field's flux along a boundary.
As such the user specifices the field's flux as a uw.Function and the nodes where this flux
is to be applied - similar to uw.conditions.DirichletCondtion
Parameters
----------
fn_flux : underworld.function.Function
Function which determines flux values.
variable : underworld.mesh.MeshVariable
The variable that describes the discretisation (mesh & DOFs) for 'indexSetsPerDof'
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Neumann conditions.
Note that the user must provide an index set for each degree of
freedom of the variable above. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Example
-------
Basic setup and usage of Neumann conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> myFunc = (uw.function.coord()[1],0.0)
>>> bottomWall = linearMesh.specialSets["MinJ_VertexSet"]
>>> tractionBC = uw.conditions.NeumannCondition(variable=velocityField, fn_flux=myFunc, indexSetsPerDof=(None,bottomWall) )
"""
_objectsDict = { "_pyvc": "PythonVC" }
_selfObjectName = "_pyvc"
def __init__(self, variable, indexSetsPerDof=None, fn_flux=None ):
# call parent
super(NeumannCondition,self).__init__(variable, indexSetsPerDof)
_fn_flux = uw.function.Function.convert(fn_flux)
if not isinstance( _fn_flux, uw.function.Function):
raise TypeError( "Provided 'fn_flux' must be of or convertible to 'Function' class." )
self.fn_flux=_fn_flux
@property
def fn_flux(self):
""" Get the underworld.Function that defines the flux """
return self._fn_flux
@fn_flux.setter
def fn_flux(self, fn):
""" Set the underworld.Function that defines the flux """
_fn = uw.function.Function.convert(fn)
if not isinstance( _fn, uw.function.Function):
raise ValueError( "Provided '_fn' must be of or convertible to 'Function' class." )
self._fn_flux = _fn
|
scripts/geodata/phrases/extraction.py | Fillr/libpostal | 3,489 | 28562 | <reponame>Fillr/libpostal<gh_stars>1000+
import csv
import six
from collections import defaultdict, Counter
from itertools import izip, islice
from geodata.text.tokenize import tokenize, token_types
from geodata.encoding import safe_encode
class FrequentPhraseExtractor(object):
'''
Extract common multi-word phrases from a file/iterator using the
frequent itemsets method to keep memory usage low.
'''
WORD_TOKEN_TYPES = (token_types.WORD,
token_types.IDEOGRAPHIC_CHAR,
token_types.ABBREVIATION,
token_types.HANGUL_SYLLABLE,
token_types.ACRONYM)
def __init__(self, min_count=5):
self.min_count = min_count
self.vocab = defaultdict(int)
self.frequencies = defaultdict(int)
self.train_words = 0
def ngrams(self, words, n=2):
for t in izip(*(islice(words, i, None) for i in xrange(n))):
yield t
def add_tokens(self, s):
for t, c in tokenize(s):
if c in self.WORD_TOKEN_TYPES:
self.vocab[((t.lower(), c), )] += 1
self.train_words += 1
def create_vocab(self, f):
for line in f:
line = line.rstrip()
if not line:
continue
self.add_tokens(line)
self.prune_vocab()
def prune_vocab(self):
for k in self.vocab.keys():
if self.vocab[k] < self.min_count:
del self.vocab[k]
def add_ngrams(self, s, n=2):
sequences = []
seq = []
for t, c in tokenize(s):
if c in self.WORD_TOKEN_TYPES:
seq.append((t, c))
elif seq:
sequences.append(seq)
seq = []
if seq:
sequences.append(seq)
for seq in sequences:
for gram in self.ngrams(seq, n=n):
last_c = None
prev_tokens = tuple([(t.lower(), c) for t, c in gram[:-1]])
if prev_tokens in self.vocab:
t, c = gram[-1]
current_token = (t.lower(), c)
self.frequencies[(prev_tokens, current_token)] += 1
def add_frequent_ngrams_to_vocab(self):
for k, v in six.iteritems(self.frequencies):
if v < self.min_count:
continue
prev, current = k
self.vocab[prev + (current,)] = v
def find_ngram_phrases(self, f, n=2):
self.frequencies = defaultdict(int)
for line in f:
line = line.rstrip()
if not line:
continue
self.add_ngrams(line, n=n)
self.add_frequent_ngrams_to_vocab()
self.frequencies = defaultdict(int)
@classmethod
def from_file(cls, f, max_phrase_len=5, min_count=5):
phrases = cls()
print('Doing frequent words for {}'.format(filename))
f.seek(0)
phrases.create_vocab(f)
for n in xrange(2, max_phrase_len + 1):
print('Doing frequent ngrams, n={} for {}'.format(n, filename))
f.seek(0)
phrases.find_ngram_phrases(f, n=n)
print('Done with {}'.format(filename))
return phrases
def to_tsv(self, filename, mode='w', max_rows=None):
f = open(filename, mode)
writer = csv.writer(f, delimiter='\t')
for i, (k, v) in enumerate(Counter(self.vocab).most_common()):
if max_rows is not None and i == max_rows:
break
gram = []
for t, c in k:
gram.append(t)
if c != token_types.IDEOGRAPHIC_CHAR:
gram.append(six.text_type(' '))
phrase = six.text_type('').join(gram)
writer.writerow((safe_encode(phrase), safe_encode(len(k)), safe_encode(v)))
|
test/hummingbot/connector/exchange/bitfinex/test_bitfinex_api_order_book_data_source.py | BGTCapital/hummingbot | 3,027 | 28566 | <filename>test/hummingbot/connector/exchange/bitfinex/test_bitfinex_api_order_book_data_source.py
import asyncio
import json
from unittest import TestCase
from aioresponses import aioresponses
import hummingbot.connector.exchange.bitfinex.bitfinex_utils as utils
from hummingbot.connector.exchange.bitfinex import BITFINEX_REST_URL
from hummingbot.connector.exchange.bitfinex.bitfinex_api_order_book_data_source import BitfinexAPIOrderBookDataSource
class BitfinexAPIOrderBookDataSourceTests(TestCase):
# the level is required to receive logs from the data source logger
level = 0
def setUp(self) -> None:
super().setUp()
self.log_records = []
BitfinexAPIOrderBookDataSource.logger().setLevel(1)
BitfinexAPIOrderBookDataSource.logger().addHandler(self)
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message for record in self.log_records)
@aioresponses()
def test_get_last_traded_price(self, api_mock):
response = [
10645,
73.93854271,
10647,
75.22266119,
731.60645389,
0.0738,
10644.00645389,
14480.89849423,
10766,
9889.1449809]
api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}",
body=json.dumps(response))
last_price = asyncio.get_event_loop().run_until_complete(
BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))
self.assertEqual(response[6], last_price)
@aioresponses()
def test_get_last_traded_price_returns_zero_when_an_error_happens(self, api_mock):
response = {"error": "ERR_RATE_LIMIT"}
api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}",
body=json.dumps(response))
last_price = asyncio.get_event_loop().run_until_complete(
BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))
self.assertEqual(0, last_price)
self.assertTrue(self._is_logged(
"ERROR",
f"Error encountered requesting ticker information. The response was: {response} "
f"(There was an error requesting ticker information BTC-USDT ({response}))"
))
|
components/gpio_control/GPIODevices/simple_button.py | steffakasid/RPi-Jukebox-RFID | 1,010 | 28580 | import time
from signal import pause
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
map_edge_parse = {'falling':GPIO.FALLING, 'rising':GPIO.RISING, 'both':GPIO.BOTH}
map_pull_parse = {'pull_up':GPIO.PUD_UP, 'pull_down':GPIO.PUD_DOWN, 'pull_off':GPIO.PUD_OFF}
map_edge_print = {GPIO.FALLING: 'falling', GPIO.RISING: 'rising', GPIO.BOTH: 'both'}
map_pull_print = {GPIO.PUD_UP:'pull_up', GPIO.PUD_DOWN: 'pull_down', GPIO.PUD_OFF: 'pull_off'}
def parse_edge_key(edge):
if edge in [GPIO.FALLING, GPIO.RISING, GPIO.BOTH]:
return edge
try:
result = map_edge_parse[edge.lower()]
except KeyError:
result = edge
raise KeyError('Unknown Edge type {edge}'.format(edge=edge))
return result
def parse_pull_up_down(pull_up_down):
if pull_up_down in [GPIO.PUD_UP, GPIO.PUD_DOWN, GPIO.PUD_OFF]:
return pull_up_down
try:
result = map_pull_parse[pull_up_down]
except KeyError:
result = pull_up_down
raise KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down))
return result
def print_edge_key(edge):
try:
result = map_edge_print[edge]
except KeyError:
result = edge
return result
def print_pull_up_down(pull_up_down):
try:
result = map_pull_print[pull_up_down]
except KeyError:
result = pull_up_down
return result
# This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function).
# It checks if the GPIO is in the state since the function was called. If the state
# changes it return False. If the time is over the function returns True.
def checkGpioStaysInState(holdingTime, gpioChannel, gpioHoldingState):
# Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter)
startTime = time.perf_counter()
# Continously check if time is not over
while True:
time.sleep(0.1)
currentState = GPIO.input(gpioChannel)
if holdingTime < (time.perf_counter() - startTime):
break
# Return if state does not match holding state
if (gpioHoldingState != currentState):
return False
# Else: Wait
if (gpioHoldingState != currentState):
return False
return True
class SimpleButton:
def __init__(self, pin, action=lambda *args: None, action2=lambda *args: None, name=None,
bouncetime=500, antibouncehack=False, edge='falling', hold_time=.3, hold_mode=None, pull_up_down='pull_up'):
self.edge = parse_edge_key(edge)
self.hold_time = hold_time
self.hold_mode = hold_mode
self.pull_up = True
self.pull_up_down = parse_pull_up_down(pull_up_down)
self.pin = pin
self.name = name
self.bouncetime = bouncetime
self.antibouncehack = antibouncehack
GPIO.setup(self.pin, GPIO.IN, pull_up_down=self.pull_up_down)
self._action = action
self._action2 = action2
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler,
bouncetime=self.bouncetime)
self.callback_with_pin_argument = False
def callbackFunctionHandler(self, *args):
if len(args) > 0 and args[0] == self.pin and not self.callback_with_pin_argument:
logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args))
args = args[1:]
logger.debug('args after: {}'.format(args))
if self.antibouncehack:
time.sleep(0.1)
inval = GPIO.input(self.pin)
if inval != GPIO.LOW:
return None
if self.hold_mode in ('Repeat', 'Postpone', 'SecondFunc', 'SecondFuncRepeat'):
return self.longPressHandler(*args)
else:
logger.info('{}: execute callback'.format(self.name))
return self.when_pressed(*args)
@property
def when_pressed(self):
logger.info('{}: action'.format(self.name))
return self._action
@property
def when_held(self):
logger.info('{}: action2'.format(self.name))
return self._action2
@when_pressed.setter
def when_pressed(self, func):
logger.info('{}: set when_pressed')
self._action = func
GPIO.remove_event_detect(self.pin)
logger.info('add new action')
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime)
def set_callbackFunction(self, callbackFunction):
self.when_pressed = callbackFunction
def longPressHandler(self, *args):
logger.info('{}: longPressHandler, mode: {}'.format(self.name, self.hold_mode))
# instant action (except Postpone mode)
if self.hold_mode != "Postpone":
self.when_pressed(*args)
# action(s) after hold_time
if self.hold_mode == "Repeat":
# Repeated call of main action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
elif self.hold_mode == "Postpone":
# Postponed call of main action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFunc":
# Call of secondary action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFuncRepeat":
# Repeated call of secondary action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
def __del__(self):
logger.debug('remove event detection')
GPIO.remove_event_detect(self.pin)
@property
def is_pressed(self):
if self.pull_up:
return not GPIO.input(self.pin)
return GPIO.input(self.pin)
def __repr__(self):
return '<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format(
self.name, self.pin, print_edge_key(self.edge), self.hold_mode, self.hold_time, self.bouncetime,self.antibouncehack,print_pull_up_down(self.pull_up_down)
)
if __name__ == "__main__":
print('please enter pin no to test')
pin = int(input())
func = lambda *args: print('FunctionCall with {}'.format(args))
btn = SimpleButton(pin=pin, action=func, hold_mode='Repeat')
pause()
|
services/ui_backend_service/tests/integration_tests/tasks_test.py | runsascoded/metaflow-service | 103 | 28584 | import pytest
import time
from .utils import (
init_app, init_db, clean_db,
add_flow, add_run, add_step, add_task, add_artifact,
_test_list_resources, _test_single_resource, add_metadata, get_heartbeat_ts
)
pytestmark = [pytest.mark.integration_tests]
# Fixtures begin
@pytest.fixture
def cli(loop, aiohttp_client):
return init_app(loop, aiohttp_client)
@pytest.fixture
async def db(cli):
async_db = await init_db(cli)
yield async_db
await clean_db(async_db)
# Fixtures end
async def test_list_tasks(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, [_task])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, [_task])
async def test_list_tasks_non_numerical(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step, task_name="bar")
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, None)
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, None)
assert len(data) == 1
assert data[0]['task_name'] == 'bar'
assert data[0]['task_id'] != 'bar'
async def test_single_task(cli, db):
await _test_single_resource(cli, db, "/flows/HelloFlow/runs/404/steps/none/tasks/5", 404, {})
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_single_task_non_numerical(cli, db):
_task = await create_task(db, task_name="bar")
_, data = await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/bar".format(**_task), 200, None)
assert data['task_name'] == 'bar'
assert data['task_id'] != 'bar'
async def test_list_old_metadata_task_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['finished_at'] = _artifact_first['ts_epoch']
_task_first_attempt['duration'] = _artifact_first['ts_epoch'] - \
_task_first_attempt['ts_epoch']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _artifact_second['ts_epoch'] - \
_task_second_attempt['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_old_metadata_task_with_multiple_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task['attempt_id'] = 1
_task['finished_at'] = _artifact_second['ts_epoch']
_task['duration'] = _artifact_second['ts_epoch'] - \
_task['ts_epoch']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_task['status'] = 'unknown'
_task['finished_at'] = _attempt_done_first['ts_epoch']
_task['duration'] = _attempt_done_first['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, True) # status 'completed'
_task['status'] = 'completed'
_task['finished_at'] = _attempt_ok_first['ts_epoch']
_task['duration'] = _attempt_ok_first['ts_epoch'] - _task['started_at']
_task['task_ok'] = None # intended behavior, status refinement location field should remain empty when metadata exists.
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_failed_status_with_heartbeat(cli, db):
_task = await create_task(db, last_heartbeat_ts=1, status="failed")
_task['finished_at'] = 1000 # should be last heartbeat in this case, due to every other timestamp missing.
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_task_running_status_with_heartbeat(cli, db):
hb_freeze = get_heartbeat_ts()
_task = await create_task(db, last_heartbeat_ts=hb_freeze)
_task['finished_at'] = None # should not have a finished at for running tasks.
_task['duration'] = hb_freeze * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_list_task_attempts(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'unknown'
_task_first_attempt['task_ok'] = 'location'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'unknown'
_task_second_attempt['task_ok'] = 'location'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_with_attempt_ok_completed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True) # status = 'completed'
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
_task['status'] = 'completed'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'failed'
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_list_task_multiple_attempts_failure(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
# Mark first attempt as 'failure' and second as 'completed'
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _attempt_ok_first['ts_epoch'] - _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _attempt_ok_second['ts_epoch'] - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_attempts_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
# attempt metadata is written but no artifacts exist yet.
# Queries should return a second attempt at this point already!
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['task_ok'] = 'location' # should have location for status artifact
_task_first_attempt['status'] = 'unknown' # 'unknown' because we cannot determine correct status from DB as attempt_ok is missing
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'running'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['duration'] = int(round(time.time() * 1000)) - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
# Write attempt_ok data for first attempt to check for status changes.
_first_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False)
# NOTE: in current implementation, attempt_ok overrides attempt-done as a more accurate timestamp for finished_at.
_task_first_attempt['finished_at'] = _first_attempt_ok['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['task_ok'] = None # should have no task_ok location, as status can be determined from db.
_task_first_attempt['status'] = 'failed' # 'failed' because now we have attempt_ok false in db.
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
async def test_task_attempt_statuses_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_attempt_done_second = await create_task_attempt_done_metadata(db, _task, attempt=1)
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
# NOTE: In the current implementation attempt_ok overrides attempt-done ts_epoch as the finished_at
# as a more accurate timestamp for when a task finished.
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
# Test cases from the google docs table.
# status 'completed' tests
#
# STATUS: attempt_ok in task metadata for the attempt is set to True
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: created_at property for attempt_ok attribute for the attempt in task metadata
# NOTE: for a more accurate finished_at timestamp, use the greatest timestamp out of task_ok / attempt_ok / attempt-done
# as this is the latest write_timestamp for the task
async def test_task_attempt_status_completed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True)
_attempt_done = await create_task_attempt_done_metadata(db, _task, 0)
_task['status'] = 'completed'
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = _attempt_done['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'running' tests
#
# STATUS 'running':
# Has all of
# Has a start time (NOTE: this requires 'attempt' metadata to be present)
# attempt_ok does not exist in the task metadata
# Has logged a heartbeat in the last x minutes (NOTE: we actually rely on heartbeat for running status.)
# No subsequent attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: does not apply (NULL)
async def test_task_attempt_status_running(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts()) # default status: 'running'
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = None
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'failed' tests
#
# STATUS:
# Either of
# attempt_ok in task metadata for the attempt is set to False
# No heartbeat has been logged for the task in the last x minutes and no new attempt has started
# A newer attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT:
# Either of (in priority)
# created_at property for attempt_ok attribute for the attempt in task metadata
# The timestamp in the heartbeat column for the task if no subsequent attempt is detected
# If a subsequent attempt exists, use the start time of the subsequent attempt
async def test_task_attempt_status_failed_with_existing_subsequent_attempt(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts())
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_first_attempt = dict(_task)
_second_attempt = dict(_task)
# we explicitly leave out attempt completion metadata for attempt 0 to test that it fails correctly
# when attempt 1 exists.
# ATTEMPT-0
_first_attempt_meta = await create_task_attempt_metadata(db, _task, 0)
_first_attempt['started_at'] = _first_attempt_meta['ts_epoch']
_first_attempt['duration'] = _first_attempt['last_heartbeat_ts'] * 1000 - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_first_attempt])
# ATTEMPT-1
_second_attempt_meta = await create_task_attempt_metadata(db, _task, 1)
_second_attempt['attempt_id'] = 1
_second_attempt['started_at'] = _second_attempt_meta['ts_epoch']
_second_attempt['duration'] = _second_attempt['last_heartbeat_ts'] * 1000 - _second_attempt['started_at']
# first attempt should be failed due to second attempt existing.
# finished_at timestamp should be the started_at of the second attempt due to it existing.
_first_attempt['status'] = 'failed'
_first_attempt['finished_at'] = _second_attempt['started_at']
_first_attempt['duration'] = _first_attempt['finished_at'] - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_second_attempt, _first_attempt])
# Resource Helpers / factories
async def create_ok_artifact_for_task(db, task, attempt=0):
"Creates and returns a _task_ok artifact for a task"
_task = (await add_artifact(
db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
artifact={
"name": "_task_ok",
"location": "location",
"ds_type": "ds_type",
"sha": "sha",
"type": "type",
"content_type": "content_type",
"attempt_id": attempt
})
).body
return _task
async def create_task(db, step=None, status="running", task_id=None, task_name=None, last_heartbeat_ts=None):
"Creates and returns a task with specific status. Optionally creates the task for a specific step if provided."
if not step:
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
step = (await add_step(
db,
flow_id=_run.get("flow_id"),
run_number=_run.get("run_number"),
step_name="step")
).body
_task = (await add_task(
db,
flow_id=step.get("flow_id"),
run_number=step.get("run_number"),
step_name=step.get("step_name"),
task_id=task_id,
task_name=task_name,
last_heartbeat_ts=last_heartbeat_ts)
).body
_task['status'] = status
return _task
async def create_metadata_for_task(db, task, metadata={}, tags=None):
"Creates a metadata record for a task"
_meta = (await add_metadata(db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
tags=tags,
metadata=metadata)
).body
return _meta
async def create_task_attempt_metadata(db, task, attempt=0):
"Create 'attempt' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt",
"field_name": "attempt",
"value": str(attempt)
}
)
async def create_task_attempt_done_metadata(db, task, attempt: int = 0):
"Create 'attempt-done' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt-done",
"field_name": "attempt-done",
"value": str(attempt)
}
)
async def create_task_attempt_ok_metadata(db, task, attempt_id: int, attempt_ok: bool = False):
"Create 'attempt_ok' metadata for a task"
return await create_metadata_for_task(
db,
task,
tags=["attempt_id:{attempt_id}".format(attempt_id=attempt_id)],
metadata={
"type": "internal_attempt_status",
"field_name": "attempt_ok",
"value": str(attempt_ok)
}
)
|
tests/unit/test_parameters/test_geometric_parameters.py | manjunathnilugal/PyBaMM | 330 | 28594 | <reponame>manjunathnilugal/PyBaMM
#
# Tests for the standard parameters
#
import pybamm
import unittest
class TestGeometricParameters(unittest.TestCase):
def test_macroscale_parameters(self):
geo = pybamm.geometric_parameters
L_n = geo.L_n
L_s = geo.L_s
L_p = geo.L_p
L_x = geo.L_x
l_n = geo.l_n
l_s = geo.l_s
l_p = geo.l_p
parameter_values = pybamm.ParameterValues(
values={
"Negative electrode thickness [m]": 0.05,
"Separator thickness [m]": 0.02,
"Positive electrode thickness [m]": 0.21,
}
)
L_n_eval = parameter_values.process_symbol(L_n)
L_s_eval = parameter_values.process_symbol(L_s)
L_p_eval = parameter_values.process_symbol(L_p)
L_x_eval = parameter_values.process_symbol(L_x)
self.assertEqual(
(L_n_eval + L_s_eval + L_p_eval).evaluate(), L_x_eval.evaluate()
)
l_n_eval = parameter_values.process_symbol(l_n)
l_s_eval = parameter_values.process_symbol(l_s)
l_p_eval = parameter_values.process_symbol(l_p)
self.assertAlmostEqual((l_n_eval + l_s_eval + l_p_eval).evaluate(), 1)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
L1Trigger/DTTrigger/python/dtTriggerPrimitiveDigis_cfi.py | ckamtsikis/cmssw | 852 | 28654 | <filename>L1Trigger/DTTrigger/python/dtTriggerPrimitiveDigis_cfi.py
import FWCore.ParameterSet.Config as cms
from L1TriggerConfig.DTTPGConfigProducers.L1DTTPGConfigFromDB_cff import *
dtTriggerPrimitiveDigis = cms.EDProducer("DTTrigProd",
debug = cms.untracked.bool(False),
# DT digis input tag
digiTag = cms.InputTag("muonDTDigis"),
# Convert output into DTTF sector numbering:
# false means [1-12] (useful for debug)
# true is [0-11] useful as input for the DTTF emulator
DTTFSectorNumbering = cms.bool(True),
# config params for dumping of LUTs info from emulator
lutBtic = cms.untracked.int32(31),
lutDumpFlag = cms.untracked.bool(False)
)
|
pythran/tests/cases/diffusion_pure_python.py | davidbrochart/pythran | 1,647 | 28665 | # Reference: http://continuum.io/blog/the-python-and-the-complied-python
#pythran export diffusePurePython(float [][], float [][], int)
#runas import numpy as np;lx,ly=(2**7,2**7);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,500)
#bench import numpy as np;lx,ly=(2**6,2**6);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,55)
import numpy as np
def diffusePurePython(u, tempU, iterNum):
"""
Apply nested iteration for the Forward-Euler Approximation
"""
mu = .1
row = u.shape[0]
col = u.shape[1]
for n in range(iterNum):
for i in range(1, row - 1):
for j in range(1, col - 1):
tempU[i, j] = u[i, j] + mu * (
u[i + 1, j] - 2 * u[i, j] + u[i - 1, j] +
u[i, j + 1] - 2 * u[i, j] + u[i, j - 1])
for i in range(1, row - 1):
for j in range(1, col - 1):
u[i, j] = tempU[i, j]
tempU[i, j] = 0.0
|
torrent_client/algorithms/uploader.py | x0x/polygon | 141 | 28675 | <filename>torrent_client/algorithms/uploader.py<gh_stars>100-1000
import asyncio
import itertools
import logging
import random
import time
from typing import List, Iterable, cast
from torrent_client.algorithms.peer_manager import PeerManager
from torrent_client.models import Peer, TorrentInfo
from torrent_client.utils import humanize_size
class Uploader:
def __init__(self, torrent_info: TorrentInfo, logger: logging.Logger, peer_manager: PeerManager):
self._download_info = torrent_info.download_info
self._statistics = self._download_info.session_statistics
self._logger = logger
self._peer_manager = peer_manager
CHOKING_CHANGING_TIME = 10
UPLOAD_PEER_COUNT = 4
ITERS_PER_OPTIMISTIC_UNCHOKING = 3
CONNECTED_RECENTLY_THRESHOLD = 60
CONNECTED_RECENTLY_COEFF = 3
def _select_optimistically_unchoked(self, peers: Iterable[Peer]) -> Peer:
cur_time = time.time()
connected_recently = []
remaining_peers = []
peer_data = self._peer_manager.peer_data
for peer in peers:
if cur_time - peer_data[peer].connected_time <= Uploader.CONNECTED_RECENTLY_THRESHOLD:
connected_recently.append(peer)
else:
remaining_peers.append(peer)
max_index = len(remaining_peers) + Uploader.CONNECTED_RECENTLY_COEFF * len(connected_recently) - 1
index = random.randint(0, max_index)
if index < len(remaining_peers):
return remaining_peers[index]
return connected_recently[(index - len(remaining_peers)) % len(connected_recently)]
def get_peer_upload_rate(self, peer: Peer) -> int:
data = self._peer_manager.peer_data[peer]
rate = data.client.downloaded # We owe them for downloading
if self._download_info.complete:
rate += data.client.uploaded # To reach maximal upload speed
return rate
async def execute(self):
prev_unchoked_peers = set()
optimistically_unchoked = None
for i in itertools.count():
peer_data = self._peer_manager.peer_data
alive_peers = list(sorted(peer_data.keys(), key=self.get_peer_upload_rate, reverse=True))
cur_unchoked_peers = set()
interested_count = 0
if Uploader.UPLOAD_PEER_COUNT:
if i % Uploader.ITERS_PER_OPTIMISTIC_UNCHOKING == 0:
if alive_peers:
optimistically_unchoked = self._select_optimistically_unchoked(alive_peers)
else:
optimistically_unchoked = None
if optimistically_unchoked is not None and optimistically_unchoked in peer_data:
cur_unchoked_peers.add(optimistically_unchoked)
if peer_data[optimistically_unchoked].client.peer_interested:
interested_count += 1
for peer in cast(List[Peer], alive_peers):
if interested_count == Uploader.UPLOAD_PEER_COUNT:
break
if peer_data[peer].client.peer_interested:
interested_count += 1
cur_unchoked_peers.add(peer)
for peer in prev_unchoked_peers - cur_unchoked_peers:
if peer in peer_data:
peer_data[peer].client.am_choking = True
for peer in cur_unchoked_peers:
peer_data[peer].client.am_choking = False
self._logger.debug('now %s peers are unchoked (total_uploaded = %s)', len(cur_unchoked_peers),
humanize_size(self._statistics.total_uploaded))
await asyncio.sleep(Uploader.CHOKING_CHANGING_TIME)
prev_unchoked_peers = cur_unchoked_peers
|
pylayers/antprop/tests/test_subarray.py | usmanwardag/pylayers | 143 | 28688 | from pylayers.antprop.aarray import *
import matplotlib.pyplot as plt
import pdb
print('--------------')
print('antprop/test_subarray.py')
print('--------------')
fcGHz = 60
lamda = 0.3/fcGHz
N1 = [ 4,4,1]
N2 = [ 2,2,1]
dm1 = [lamda/2.,lamda/2.,0]
dm2 = [3*lamda,3*lamda,0]
A1 = AntArray(fGHz=np.array([fcGHz]),N=N1,dm=dm1,typant='Omni')
A2 = AntArray(fGHz=np.array([fcGHz]),N=N2,dm=dm2,array=A1)
#A1.eval()
|
pyorient/ogm/commands.py | spy7/pyorient | 142 | 28727 | <reponame>spy7/pyorient
from ..utils import to_str
class VertexCommand(object):
def __init__(self, command_text):
self.command_text = command_text
def __str__(self):
return to_str(self.__unicode__())
def __unicode__(self):
return u'{}'.format(self.command_text)
class CreateEdgeCommand(object):
def __init__(self, command_text):
self.command_text = command_text
self.retries = None
def __str__(self):
return to_str(self.__unicode__())
def __unicode__(self):
if self.retries:
return u'{} RETRY {}'.format(self.command_text, self.retries)
else:
return u'{}'.format(self.command_text)
def retry(self, retries):
self.retries = retries
return self
|
g2pk/special.py | elbum/g2pK | 136 | 28746 | # -*- coding: utf-8 -*-
'''
Special rule for processing Hangul
https://github.com/kyubyong/g2pK
'''
import re
from g2pk.utils import gloss, get_rule_id2text
rule_id2text = get_rule_id2text()
############################ vowels ############################
def jyeo(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.1"]
# 일반적인 규칙으로 취급한다 by kyubyong
out = re.sub("([ᄌᄍᄎ])ᅧ", r"\1ᅥ", inp)
gloss(verbose, out, inp, rule)
return out
def ye(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.2"]
# 실제로 언중은 예, 녜, 셰, 쎼 이외의 'ㅖ'는 [ㅔ]로 발음한다. by kyubyong
if descriptive:
out = re.sub("([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ", r"\1ᅦ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def consonant_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.3"]
out = re.sub("([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ", r"\1ᅵ", inp)
gloss(verbose, out, inp, rule)
return out
def josa_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.2"]
# 실제로 언중은 높은 확률로 조사 '의'는 [ㅔ]로 발음한다.
if descriptive:
out = re.sub("의/J", "에", inp)
else:
out = inp.replace("/J", "")
gloss(verbose, out, inp, rule)
return out
def vowel_ui(inp, descriptive=False, verbose=False):
rule = rule_id2text["5.4.1"]
# 실제로 언중은 높은 확률로 단어의 첫음절 이외의 '의'는 [ㅣ]로 발음한다."""
if descriptive:
out = re.sub("(\Sᄋ)ᅴ", r"\1ᅵ", inp)
else:
out = inp
gloss(verbose, out, inp, rule)
return out
def jamo(inp, descriptive=False, verbose=False):
rule = rule_id2text["16"]
out = inp
out = re.sub("([그])ᆮᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆽᆾᇀᇂ]ᄋ", r"\1ᄉ", out)
out = re.sub("([으])[ᆿ]ᄋ", r"\1ᄀ", out)
out = re.sub("([으])[ᇁ]ᄋ", r"\1ᄇ", out)
gloss(verbose, out, inp, rule)
return out
############################ 어간 받침 ############################
def rieulgiyeok(inp, descriptive=False, verbose=False):
rule = rule_id2text["11.1"]
out = inp
out = re.sub("ᆰ/P([ᄀᄁ])", r"ᆯᄁ", out)
gloss(verbose, out, inp, rule)
return out
def rieulbieub(inp, descriptive=False, verbose=False):
rule = rule_id2text["25"]
out = inp
out = re.sub("([ᆲᆴ])/Pᄀ", r"\1ᄁ", out)
out = re.sub("([ᆲᆴ])/Pᄃ", r"\1ᄄ", out)
out = re.sub("([ᆲᆴ])/Pᄉ", r"\1ᄊ", out)
out = re.sub("([ᆲᆴ])/Pᄌ", r"\1ᄍ", out)
gloss(verbose, out, inp, rule)
return out
def verb_nieun(inp, descriptive=False, verbose=False):
rule = rule_id2text["24"]
out = inp
pairs = [ ("([ᆫᆷ])/Pᄀ", r"\1ᄁ"),
("([ᆫᆷ])/Pᄃ", r"\1ᄄ"),
("([ᆫᆷ])/Pᄉ", r"\1ᄊ"),
("([ᆫᆷ])/Pᄌ", r"\1ᄍ"),
("ᆬ/Pᄀ", "ᆫᄁ"),
("ᆬ/Pᄃ", "ᆫᄄ"),
("ᆬ/Pᄉ", "ᆫᄊ"),
("ᆬ/Pᄌ", "ᆫᄍ"),
("ᆱ/Pᄀ", "ᆷᄁ"),
("ᆱ/Pᄃ", "ᆷᄄ"),
("ᆱ/Pᄉ", "ᆷᄊ"),
("ᆱ/Pᄌ", "ᆷᄍ") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
def balb(inp, descriptive=False, verbose=False):
rule = rule_id2text["10.1"]
out = inp
syllable_final_or_consonants = "($|[^ᄋᄒ])"
# exceptions
out = re.sub(f"(바)ᆲ({syllable_final_or_consonants})", r"\1ᆸ\2", out)
out = re.sub(f"(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)", r"\1ᆸ\2", out)
gloss(verbose, out, inp, rule)
return out
def palatalize(inp, descriptive=False, verbose=False):
rule = rule_id2text["17"]
out = inp
out = re.sub("ᆮᄋ([ᅵᅧ])", r"ᄌ\1", out)
out = re.sub("ᇀᄋ([ᅵᅧ])", r"ᄎ\1", out)
out = re.sub("ᆴᄋ([ᅵᅧ])", r"ᆯᄎ\1", out)
out = re.sub("ᆮᄒ([ᅵ])", r"ᄎ\1", out)
gloss(verbose, out, inp, rule)
return out
def modifying_rieul(inp, descriptive=False, verbose=False):
rule = rule_id2text["27"]
out = inp
pairs = [ ("ᆯ/E ᄀ", r"ᆯ ᄁ"),
("ᆯ/E ᄃ", r"ᆯ ᄄ"),
("ᆯ/E ᄇ", r"ᆯ ᄈ"),
("ᆯ/E ᄉ", r"ᆯ ᄊ"),
("ᆯ/E ᄌ", r"ᆯ ᄍ"),
("ᆯ걸", "ᆯ껄"),
("ᆯ밖에", "ᆯ빠께"),
("ᆯ세라", "ᆯ쎄라"),
("ᆯ수록", "ᆯ쑤록"),
("ᆯ지라도", "ᆯ찌라도"),
("ᆯ지언정", "ᆯ찌언정"),
("ᆯ진대", "ᆯ찐대") ]
for str1, str2 in pairs:
out = re.sub(str1, str2, out)
gloss(verbose, out, inp, rule)
return out
|
ivi/tektronix/__init__.py | sacherjj/python-ivi | 161 | 28749 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Oscilloscopes
# DPO4000
from .tektronixDPO4032 import tektronixDPO4032
from .tektronixDPO4034 import tektronixDPO4034
from .tektronixDPO4054 import tektronixDPO4054
from .tektronixDPO4104 import tektronixDPO4104
# MSO4000
from .tektronixMSO4032 import tektronixMSO4032
from .tektronixMSO4034 import tektronixMSO4034
from .tektronixMSO4054 import tektronixMSO4054
from .tektronixMSO4104 import tektronixMSO4104
# DPO4000B
from .tektronixDPO4014B import tektronixDPO4014B
from .tektronixDPO4034B import tektronixDPO4034B
from .tektronixDPO4054B import tektronixDPO4054B
from .tektronixDPO4102B import tektronixDPO4102B
from .tektronixDPO4104B import tektronixDPO4104B
# MSO4000B
from .tektronixMSO4014B import tektronixMSO4014B
from .tektronixMSO4034B import tektronixMSO4034B
from .tektronixMSO4054B import tektronixMSO4054B
from .tektronixMSO4102B import tektronixMSO4102B
from .tektronixMSO4104B import tektronixMSO4104B
# MDO4000
from .tektronixMDO4054 import tektronixMDO4054
from .tektronixMDO4104 import tektronixMDO4104
# MDO4000B
from .tektronixMDO4014B import tektronixMDO4014B
from .tektronixMDO4034B import tektronixMDO4034B
from .tektronixMDO4054B import tektronixMDO4054B
from .tektronixMDO4104B import tektronixMDO4104B
# MDO3000
from .tektronixMDO3012 import tektronixMDO3012
from .tektronixMDO3014 import tektronixMDO3014
from .tektronixMDO3022 import tektronixMDO3022
from .tektronixMDO3024 import tektronixMDO3024
from .tektronixMDO3032 import tektronixMDO3032
from .tektronixMDO3034 import tektronixMDO3034
from .tektronixMDO3052 import tektronixMDO3052
from .tektronixMDO3054 import tektronixMDO3054
from .tektronixMDO3102 import tektronixMDO3102
from .tektronixMDO3104 import tektronixMDO3104
# Function Generators
from .tektronixAWG2005 import tektronixAWG2005
from .tektronixAWG2020 import tektronixAWG2020
from .tektronixAWG2021 import tektronixAWG2021
from .tektronixAWG2040 import tektronixAWG2040
from .tektronixAWG2041 import tektronixAWG2041
# Power Supplies
from .tektronixPS2520G import tektronixPS2520G
from .tektronixPS2521G import tektronixPS2521G
# Optical attenuators
from .tektronixOA5002 import tektronixOA5002
from .tektronixOA5012 import tektronixOA5012
from .tektronixOA5022 import tektronixOA5022
from .tektronixOA5032 import tektronixOA5032
# Current probe amplifiers
from .tektronixAM5030 import tektronixAM5030
|
scripts/logfetch/search.py | madhuri7112/Singularity | 692 | 28790 | import os
import re
import fnmatch
from logfetch_base import log, is_in_date_range
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
log(colored('Including log {0}\n'.format(filename), 'blue'), args, True)
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
log(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'), args, True)
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"-\d{13}-", filename)
if timestamps:
return is_in_date_range(args, int(str(timestamps[-1]).replace("-", "")[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId) |
Codes/Liam/001_two_sum.py | liuxiaohui1221/algorithm | 256 | 28809 | <reponame>liuxiaohui1221/algorithm
# 执行用时 : 348 ms
# 内存消耗 : 13 MB
# 方案:哈希表
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# 创建哈希表{value:idx}
record = {}
# 遍数组
for idx, value in enumerate(nums):
# 如果差值在哈希表中,返回对应索引 以及 循环中本次idx
# 如果差值不在,则在哈希表中插入该value:idx
if (target - value) in record.keys():
return [record[target - value], idx]
else:
record[value] = idx
|
tests/test_socfaker_application.py | priamai/soc-faker | 122 | 28825 | def test_socfaker_application_status(socfaker_fixture):
assert socfaker_fixture.application.status in ['Active', 'Inactive', 'Legacy']
def test_socfaker_application_account_status(socfaker_fixture):
assert socfaker_fixture.application.account_status in ['Enabled', 'Disabled']
def test_socfaker_name(socfaker_fixture):
assert socfaker_fixture.application.name
def test_socfaker_application_logon_timestamp(socfaker_fixture):
assert socfaker_fixture.application.logon_timestamp |
questions/max-number-of-k-sum-pairs/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 28828 | """
You are given an integer array nums and an integer k.
In one operation, you can pick two numbers from the array whose sum equals k and remove them from the array.
Return the maximum number of operations you can perform on the array.
Example 1:
Input: nums = [1,2,3,4], k = 5
Output: 2
Explanation: Starting with nums = [1,2,3,4]:
- Remove numbers 1 and 4, then nums = [2,3]
- Remove numbers 2 and 3, then nums = []
There are no more pairs that sum up to 5, hence a total of 2 operations.
Example 2:
Input: nums = [3,1,3,4,3], k = 6
Output: 1
Explanation: Starting with nums = [3,1,3,4,3]:
- Remove the first two 3's, then nums = [1,4,3]
There are no more pairs that sum up to 6, hence a total of 1 operation.
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 109
1 <= k <= 109
"""
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
c = collections.Counter(nums)
r = 0
for n, v in c.items():
t = k - n
if t not in c:
continue
if t == n:
m = v // 2
r += m
c[n] = v - m
continue
m = min(v, c[t])
r += m
c[n] = v - m
c[t] = c[t] - m
return r |
csv2ofx/mappings/starling.py | mibanescu/csv2ofx | 153 | 28836 | <reponame>mibanescu/csv2ofx
from __future__ import (
absolute_import, division, print_function, unicode_literals)
from operator import itemgetter
def fixdate(ds):
dmy = ds.split('/')
# BUG (!?): don't understand but stolen from ubs-ch-fr.py
return '.'.join((dmy[1], dmy[0], dmy[2]))
mapping = {
'has_header': True,
'date': lambda tr: fixdate(tr['Date']),
'amount': itemgetter('Amount (GBP)'),
'desc': itemgetter('Reference'),
'payee': itemgetter('Counter Party')
}
|
Source/Tools/BindTool/writer.py | ssinai1/rbfx | 441 | 28841 | <filename>Source/Tools/BindTool/writer.py<gh_stars>100-1000
class InterfaceWriter(object):
def __init__(self, output_path):
self._output_path_template = output_path + '/_{key}_{subsystem}.i'
self._fp = {
'pre': {},
'post': {},
}
def _write(self, key, subsystem, text):
subsystem = subsystem.lower()
fp = self._fp[key].get(subsystem)
if fp is None:
self._fp[key][subsystem] = fp = open(self._output_path_template.format(key=key, subsystem=subsystem), 'w+')
fp.write(text)
fp.write('\n')
def write_pre(self, subsystem, text):
self._write('pre', subsystem, text)
def write_post(self, subsystem, text):
self._write('post', subsystem, text)
def close(self):
for fp_map in self._fp.values():
for fp in fp_map.values():
fp.close()
|
training/scripts/trainer/src/convert/convert.py | TommyTeaVee/training | 2,442 | 28876 | <reponame>TommyTeaVee/training<filename>training/scripts/trainer/src/convert/convert.py<gh_stars>1000+
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.framework import dtypes
from tensorflow.python.tools import strip_unused_lib
tf.enable_eager_execution()
parser = argparse.ArgumentParser()
# export types
parser.add_argument("--coreml", action="store_true")
parser.add_argument("--tflite", action="store_true")
parser.add_argument("--tfjs", action="store_true")
parser.add_argument("--model-type", type=str)
# import paths
parser.add_argument("--saved-model", type=str)
# export paths
parser.add_argument("--mlmodel-path", type=str)
parser.add_argument("--tflite-path", type=str)
parser.add_argument("--tfjs-path", type=str)
args = parser.parse_args()
def print_header(msg):
print(" " * 80)
print("_" * 80)
print(msg)
def print_footer(msg):
print(msg)
print("_" * 80)
print(" " * 80)
def attempt_conversion(model_type, model_format):
def attempt_conversion(convert):
try:
print_header(f"Converting {model_type} model to {model_format}")
convert()
print_footer(f"Successfully converted to {model_format}")
except Exception as e:
print(e)
print_footer(f"Unable to convert to {model_format}")
return attempt_conversion
def get_anchors(graph):
"""
Computes the list of anchor boxes by sending a fake image through the graph.
Outputs an array of size (4, num_anchors) where each element is an anchor box
given as [ycenter, xcenter, height, width] in normalized coordinates.
"""
with tf.Session(graph=graph) as sess:
anchors_tensor = "Concatenate/concat:0"
image_tensor = graph.get_tensor_by_name("image_tensor:0")
box_corners_tensor = graph.get_tensor_by_name(anchors_tensor)
box_corners = sess.run(
box_corners_tensor, feed_dict={image_tensor: np.zeros((1, 300, 300, 3))}
)
# The TensorFlow graph gives each anchor box as [ymin, xmin, ymax, xmax].
# Convert these min/max values to a center coordinate, width and height.
ymin, xmin, ymax, xmax = np.transpose(box_corners)
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.0
xcenter = xmin + width / 2.0
return np.stack([ycenter, xcenter, height, width])
def strip_and_freeze_model(
saved_model, output_path, input_node_names=[], output_node_names=[]
):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
print("loading model...")
tf.saved_model.loader.load(sess, [tf.saved_model.SERVING], saved_model)
print("stripping unused ops...")
gdef = strip_unused_lib.strip_unused(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_node_names=input_node_names,
output_node_names=output_node_names,
placeholder_type_enum=dtypes.float32.as_datatype_enum,
)
gdef = tf.graph_util.convert_variables_to_constants(
sess, gdef, output_node_names
)
with gfile.GFile(output_path, "wb") as f:
print("writing frozen model...")
f.write(gdef.SerializeToString())
return graph
os.makedirs(".tmp", exist_ok=True)
################################################################################
# Object Detection
################################################################################
if args.model_type == "localization":
labels_path = os.path.join(args.saved_model, "labels.json")
@attempt_conversion("object detection", "Core ML")
def convert_object_detection_coreml():
if args.coreml:
from convert.convert_to_core_ml import convert_localization
frozen_model = ".tmp/coreml_frozen_model.pb"
graph = strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Preprocessor/sub"],
output_node_names=["Squeeze", "Postprocessor/convert_scores"],
)
anchors = get_anchors(graph)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.mlmodel_path,
anchors=anchors,
)
@attempt_conversion("object detection", "TensorFlow Lite")
def convert_object_detection_tflite():
if args.tflite:
from convert.convert_to_tflite import convert_localization
frozen_model = ".tmp/tflite_frozen_model.pb"
graph = strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Preprocessor/sub"],
output_node_names=["Squeeze", "Postprocessor/convert_scores"],
)
anchors = get_anchors(graph)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tflite_path,
anchors=anchors,
)
@attempt_conversion("object detection", "TensorFlow.js")
def convert_object_detection_tfjs():
if args.tfjs:
from convert.convert_to_tfjs import convert_localization
frozen_model = ".tmp/tfjs_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=[],
output_node_names=["Postprocessor/ExpandDims_1", "Postprocessor/Slice"],
)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tfjs_path,
)
################################################################################
# Classification
################################################################################
if args.model_type == "classification":
labels_path = os.path.join(args.saved_model, "labels.txt")
@attempt_conversion("classification", "Core ML")
def convert_classification_coreml():
if args.coreml:
from convert.convert_to_core_ml import convert_classification
frozen_model = ".tmp/coreml_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.mlmodel_path,
)
@attempt_conversion("classification", "TensorFlow Lite")
def convert_classification_tflite():
if args.tflite:
from convert.convert_to_tflite import convert_classification
frozen_model = ".tmp/tflite_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tflite_path,
)
@attempt_conversion("classification", "TensorFlow.js")
def convert_classification_tfjs():
if args.tfjs:
from convert.convert_to_tfjs import convert_classification
frozen_model = ".tmp/tfjs_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tfjs_path,
)
|
plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 108 | 28881 | <reponame>slmjy/oci-ansible-collection<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_resource_manager_template
short_description: Manage a Template resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Template resource in Oracle Cloud Infrastructure
- For I(state=present), creates a private template in the specified compartment.
- "This resource has the following action operations in the M(oracle.oci.oci_resource_manager_template_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
- Required for create using I(state=present).
type: str
display_name:
description:
- The template's display name. Avoid entering confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
description:
description:
- Description of the template. Avoid entering confidential information.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
- This parameter is updatable.
type: str
logo_file_base64_encoded:
description:
- "Base64-encoded logo to use as the template icon.
Template icon file requirements: PNG format, 50 KB maximum, 110 x 110 pixels."
- This parameter is updatable.
type: str
template_config_source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
template_config_source_type:
description:
- Specifies the `configSourceType` for uploading the Terraform configuration.
- This parameter is updatable.
type: str
choices:
- "ZIP_UPLOAD"
required: true
zip_file_base64_encoded:
description:
- ""
- This parameter is updatable.
- Applicable when template_config_source_type is 'ZIP_UPLOAD'
type: str
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
template_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the template.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the Template.
- Use I(state=present) to create or update a Template.
- Use I(state=absent) to delete a Template.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create template
oci_resource_manager_template:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
description: description_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
# optional
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
# optional
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
state: absent
"""
RETURN = """
template:
description:
- Details of the Template resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- Unique identifier (L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)) for the template.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
category_id:
description:
- Unique identifier for the category where the template is located.
Possible values are `0` (Quick Starts), `1` (Service), `2` (Architecture), and `3` (Private).
returned: on success
type: str
sample: "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- Human-readable name of the template.
returned: on success
type: str
sample: display_name_example
description:
description:
- Brief description of the template.
returned: on success
type: str
sample: description_example
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
returned: on success
type: str
sample: long_description_example
is_free_tier:
description:
- whether the template will work for free tier tenancy.
returned: on success
type: bool
sample: true
time_created:
description:
- "The date and time at which the template was created.
Format is defined by RFC3339.
Example: `2020-11-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
template_config_source:
description:
- ""
returned: on success
type: complex
contains:
template_config_source_type:
description:
- The type of configuration source to use for the template configuration.
returned: on success
type: str
sample: ZIP_UPLOAD
lifecycle_state:
description:
- The current lifecycle state of the template.
returned: on success
type: str
sample: ACTIVE
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"category_id": "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"long_description": "long_description_example",
"is_free_tier": true,
"time_created": "2013-10-20T19:20:30+01:00",
"template_config_source": {
"template_config_source_type": "ZIP_UPLOAD"
},
"lifecycle_state": "ACTIVE",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.resource_manager import ResourceManagerClient
from oci.resource_manager.models import CreateTemplateDetails
from oci.resource_manager.models import UpdateTemplateDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TemplateHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "template_id"
def get_module_resource_id(self):
return self.module.params.get("template_id")
def get_get_fn(self):
return self.client.get_template
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_template, template_id=self.module.params.get("template_id"),
)
def get_required_kwargs_for_list(self):
return dict()
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["compartment_id", "template_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_templates, **kwargs)
def get_create_model_class(self):
return CreateTemplateDetails
def get_exclude_attributes(self):
return ["logo_file_base64_encoded"]
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_template,
call_fn_args=(),
call_fn_kwargs=dict(create_template_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateTemplateDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_template,
call_fn_args=(),
call_fn_kwargs=dict(
template_id=self.module.params.get("template_id"),
update_template_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_template,
call_fn_args=(),
call_fn_kwargs=dict(template_id=self.module.params.get("template_id"),),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
TemplateHelperCustom = get_custom_class("TemplateHelperCustom")
class ResourceHelper(TemplateHelperCustom, TemplateHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
description=dict(type="str"),
long_description=dict(type="str"),
logo_file_base64_encoded=dict(type="str"),
template_config_source=dict(
type="dict",
options=dict(
template_config_source_type=dict(
type="str", required=True, choices=["ZIP_UPLOAD"]
),
zip_file_base64_encoded=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
template_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="template",
service_client_class=ResourceManagerClient,
namespace="resource_manager",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
mayan/apps/documents/tests/test_links.py | bonitobonita24/Mayan-EDMS | 343 | 28898 | from django.urls import reverse
from ..links.document_file_links import (
link_document_file_delete, link_document_file_download_quick
)
from ..links.favorite_links import (
link_document_favorites_add, link_document_favorites_remove
)
from ..links.trashed_document_links import link_document_restore
from ..models import TrashedDocument
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_view, permission_trashed_document_restore
)
from .base import GenericDocumentViewTestCase
from .mixins.favorite_document_mixins import FavoriteDocumentTestMixin
class FavoriteDocumentLinkTestCase(
FavoriteDocumentTestMixin, GenericDocumentViewTestCase
):
def test_favorite_document_add_link_no_permission(self):
self._create_test_document_stub()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_add_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_add_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_no_permission(self):
self._create_test_document_stub()
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_remove_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
class DocumentsLinksTestCase(GenericDocumentViewTestCase):
def test_document_file_delete_link_no_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_delete_link_with_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_delete.view,
args=(self.test_document.files.first().pk,)
)
)
def test_document_file_download_link_no_permission(self):
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_download_link_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_download_quick.view,
args=(self.test_document.file_latest.pk,)
)
)
class TrashedDocumentsLinksTestCase(GenericDocumentViewTestCase):
def setUp(self):
super().setUp()
self.test_document.delete()
self.test_trashed_document = TrashedDocument.objects.get(
pk=self.test_document.pk
)
self.add_test_view(test_object=self.test_trashed_document)
self.context = self.get_test_view()
def test_trashed_document_restore_link_no_permission(self):
resolved_link = link_document_restore.resolve(context=self.context)
self.assertEqual(resolved_link, None)
def test_trashed_document_restore_link_with_permission(self):
self.grant_access(
obj=self.test_document, permission=permission_trashed_document_restore
)
resolved_link = link_document_restore.resolve(context=self.context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_restore.view,
args=(self.test_trashed_document.pk,)
)
)
|
insights/parsers/ceph_insights.py | lhuett/insights-core | 121 | 28901 | <reponame>lhuett/insights-core<gh_stars>100-1000
"""
ceph_insights - command ``ceph insights``
=========================================
"""
import json
import re
from .. import CommandParser, parser
from insights.specs import Specs
@parser(Specs.ceph_insights)
class CephInsights(CommandParser):
"""
Parse the output of the ``ceph insights`` command.
Attributes:
version (dict): version information of the Ceph cluster.
data (dict): a dictionary of the parsed output.
The ``data`` attribute is a dictionary containing the parsed output of the
``ceph insights`` command. The following are available in ``data``:
* ``crashes`` - summary of daemon crashes for the past 24 hours
* ``health`` - the current and historical (past 24 hours) health checks
* ``config`` - cluster and daemon configuration settings
* ``osd_dump`` - osd and pool information
* ``df`` - storage usage statistics
* ``osd_tree`` - osd topology
* ``fs_map`` - file system map
* ``crush_map`` - the CRUSH map
* ``mon_map`` - monitor map
* ``service_map`` - service map
* ``manager_map`` - manager map
* ``mon_status`` - monitor status
* ``pg_summary`` - placement group summary
* ``osd_metadata`` - per-OSD metadata
* ``version`` - ceph software version
* ``errors`` - any errors encountered collecting this data
The ``version`` attribute contains a normalized view of ``self.data["version"]``.
Examples:
>>> ceph_insights.version["release"] == 14
True
>>> ceph_insights.version["major"] == 0
True
>>> ceph_insights.version["minor"] == 0
True
>>> isinstance(ceph_insights.data["crashes"], dict)
True
>>> isinstance(ceph_insights.data["health"], dict)
True
"""
IGNORE_RE = [
"\*\*\* DEVELOPER MODE",
"\d+-\d+-\d+.+WARNING: all dangerous"
]
bad_lines = [
"module 'insights' is not enabled",
"no valid command found"
]
def __init__(self, *args, **kwargs):
kwargs.update(dict(extra_bad_lines=self.bad_lines))
super(CephInsights, self).__init__(*args, **kwargs)
def _sanitize_content(self, content):
"""Remove lines matching IGNORE_RE at start of content"""
slice_point = 0
ignore_re = re.compile('|'.join(CephInsights.IGNORE_RE))
for line in content:
if not line or ignore_re.match(line):
slice_point += 1
continue
break
return content[slice_point:]
def _parse_version(self):
"""
Add a Ceph version property as a dictionary with the keys "release",
"major", "minor" containing numeric values, and the key "full" with the
full version string. If Ceph is not compiled with verison information
(this should never be the case in a production system), then "release",
"major", and "minor" are set to None.
"""
self.version = {
"release": None,
"major": None,
"minor": None
}
self.version.update(self.data["version"])
def parse_content(self, content):
content = self._sanitize_content(content)
self.data = json.loads(''.join(content))
self._parse_version()
|
test/__init__.py | shikajiro/picosdk-python-wrappers | 114 | 28921 | #
# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.
#
|
testing/MLDB-1104-input-data-spec.py | kstepanmpmg/mldb | 665 | 28926 | #
# MLDB-1104-input-data-spec.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import unittest
import datetime
import random
from mldb import mldb, ResponseException
class InputDataSpecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.load_kmeans_dataset()
cls.load_classifier_dataset()
@classmethod
def load_kmeans_dataset(cls):
kmeans_example = mldb.create_dataset({
"type": "sparse.mutable",
'id' : 'kmeans_example'
})
now = datetime.datetime.now()
for i in range(100):
val_x = float(random.randint(-5, 5))
val_y = float(random.randint(-5, 5))
row = [['x', val_x, now], ['y', val_y, now]]
kmeans_example.record_row('row_%d' % i, row)
kmeans_example.commit()
def train_kmeans(self, training_data):
metric = "euclidean"
mldb.put("/v1/procedures/kmeans", {
'type' : 'kmeans.train',
'params' : {
'trainingData' : training_data,
'centroidsDataset' : {
'id' : 'kmeans_centroids',
'type' : 'embedding',
'params': {
'metric': metric
}
},
'numClusters' : 2,
'metric': metric
}
})
def train_svd(self, training_data):
mldb.put("/v1/procedures/svd", {
'type' : 'svd.train',
'params' : {
'trainingData' : training_data,
'runOnCreation' : True
}
})
@classmethod
def load_classifier_dataset(cls):
dataset = mldb.create_dataset({
"type": "sparse.mutable",
"id": "iris_dataset"
})
with open("./mldb/testing/dataset/iris.data") as f:
for i, line in enumerate(f):
cols = []
line_split = line.split(',')
if len(line_split) != 5:
continue
# Jemery's what if a feature is named label
cols.append(["label", float(line_split[0]), 0]) # sepal length
cols.append(["labels", float(line_split[1]), 0]) # sepal width
cols.append(["petal length", float(line_split[2]), 0])
cols.append(["petal width", float(line_split[3]), 0])
cols.append(["features", line_split[4].strip('\n"'), 0]) #class
dataset.record_row(str(i+1), cols)
dataset.commit()
def train_classifier(self, training_data):
result = mldb.put("/v1/procedures/classifier", {
'type' : 'classifier.train',
'params' : {
'trainingData' : training_data,
"configuration": {
"type": "decision_tree",
"max_depth": 8,
"verbosity": 3,
"update_alg": "prob"
},
"modelFileUrl": "file://tmp/MLDB-1104.cls",
"mode": "categorical",
"functionName": "classifier_apply",
'runOnCreation' : True
}
})
return result.json()
def test_train_kmeans(self):
# KMEANS TRAIN PROCEDURE WITH BOTH TYPE OF INPUT DATA
self.train_kmeans('select * from kmeans_example')
self.train_kmeans('select x + y as x, y + x as y from kmeans_example')
self.train_kmeans({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
# TEST ERROR CASE
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x having y > 2')
def test_train_svd(self):
self.train_svd('select * from kmeans_example')
self.train_svd('select x, y from kmeans_example')
self.train_svd('select x AS z, y from kmeans_example')
self.train_svd('select * EXCLUDING(x) from kmeans_example')
self.train_svd({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
self.train_svd('select x + 1, y from kmeans_example')
with self.assertRaises(ResponseException):
self.train_svd('select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_svd(
'select x, y from kmeans_example group by x having y > 2')
def test_train_classifier(self):
mldb.log(self.train_classifier(
"select {label, labels} as features, features as label "
"from iris_dataset"))
result = mldb.get(
"/v1/query",
q="SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset")
rows = result.json()
mldb.log("-------------------------------");
mldb.log(rows)
# compare the classifier results on the train data with the original
# label
count = 0
for row in rows:
_max = 0
category = ""
for column in row['columns'][1:4]:
if column[1] > _max:
_max = column[1]
# remove the leading scores. and quotation marks
category = column[0][10:-3]
if category != row['columns'][0][1]:
count += 1
# misclassified result should be a small fraction
self.assertTrue(
float(count) / len(rows) < 0.2,
'the classifier results on the train data are strangely low')
if __name__ == '__main__':
mldb.run_tests()
|
tests/tensors/similarity_functions/dot_product_test.py | richarajpal/deep_qa | 459 | 28934 | <reponame>richarajpal/deep_qa<filename>tests/tensors/similarity_functions/dot_product_test.py<gh_stars>100-1000
# pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.dot_product import DotProduct
class TestDotProductSimilarityFunction:
dot_product = DotProduct(name='dot_product')
def test_initialize_weights_returns_empty(self):
weights = self.dot_product.initialize_weights(3, 3)
assert isinstance(weights, list) and len(weights) == 0
def test_compute_similarity_does_a_dot_product(self):
a_vectors = numpy.asarray([[1, 1, 1], [-1, -1, -1]])
b_vectors = numpy.asarray([[1, 0, 1], [1, 0, 0]])
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert numpy.all(result == [2, -1])
def test_compute_similarity_works_with_higher_order_tensors(self):
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
assert_almost_equal(result[3, 2, 1, 3],
numpy.dot(a_vectors[3, 2, 1, 3], b_vectors[3, 2, 1, 3]),
decimal=6)
|
zipline/pipeline/common.py | lv-cha/zipline-chinese | 606 | 28998 | <reponame>lv-cha/zipline-chinese<filename>zipline/pipeline/common.py
"""
Common constants for Pipeline.
"""
AD_FIELD_NAME = 'asof_date'
ANNOUNCEMENT_FIELD_NAME = 'announcement_date'
CASH_FIELD_NAME = 'cash'
CASH_AMOUNT_FIELD_NAME = 'cash_amount'
BUYBACK_ANNOUNCEMENT_FIELD_NAME = 'buyback_date'
DAYS_SINCE_PREV = 'days_since_prev'
DAYS_SINCE_PREV_DIVIDEND_ANNOUNCEMENT = 'days_since_prev_dividend_announcement'
DAYS_SINCE_PREV_EX_DATE = 'days_since_prev_ex_date'
DAYS_TO_NEXT = 'days_to_next'
DAYS_TO_NEXT_EX_DATE = 'days_to_next_ex_date'
EX_DATE_FIELD_NAME = 'ex_date'
NEXT_AMOUNT = 'next_amount'
NEXT_ANNOUNCEMENT = 'next_announcement'
NEXT_EX_DATE = 'next_ex_date'
NEXT_PAY_DATE = 'next_pay_date'
PAY_DATE_FIELD_NAME = 'pay_date'
PREVIOUS_AMOUNT = 'previous_amount'
PREVIOUS_ANNOUNCEMENT = 'previous_announcement'
PREVIOUS_BUYBACK_ANNOUNCEMENT = 'previous_buyback_announcement'
PREVIOUS_BUYBACK_CASH = 'previous_buyback_cash'
PREVIOUS_BUYBACK_SHARE_COUNT = 'previous_buyback_share_count'
PREVIOUS_EX_DATE = 'previous_ex_date'
PREVIOUS_PAY_DATE = 'previous_pay_date'
SHARE_COUNT_FIELD_NAME = 'share_count'
SID_FIELD_NAME = 'sid'
TS_FIELD_NAME = 'timestamp'
|
docs/ASH/notebooks/object-segmentation-on-azure-stack/score.py | RichardZhaoW/AML-Kubernetes | 176 | 29016 | import os
import json
import time
import torch
# Called when the deployed service starts
def init():
global model
global device
# Get the path where the deployed model can be found.
model_filename = 'obj_segmentation.pkl'
model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = torch.load(model_path, map_location=device)
# Handle requests to the service
def run(data):
try:
start_at = time.time()
inputs = json.loads(data)
img_data_list = inputs["instances"]
img_tensor_list = [torch.tensor(item) for item in img_data_list]
model.eval()
with torch.no_grad():
predictions = model([item.to(device) for item in img_tensor_list])
pred_data_list = [{
"masks": prediction['masks'][0, 0].mul(255).byte().cpu().numpy().tolist(),
"boxes": prediction['boxes'].numpy().tolist(),
"labels": prediction['labels'].numpy().tolist(),
"scores": prediction['scores'].numpy().tolist(),
} for prediction in predictions]
return {"predictions": pred_data_list,
"elapsed_time": time.time() - start_at}
except Exception as e:
error = str(e)
return error
|
pox/info/debug_deadlock.py | korrigans84/pox_network | 416 | 29019 | <reponame>korrigans84/pox_network
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primitive help for debugging deadlocks.
Prints stack info for all threads.
(Might be more useful if it only printed stack frames that
were not changing, sort of like recoco_spy.)
This was initially factored out from a pox.py modification by
<NAME>.
"""
import sys
import time
import inspect
import traceback
import threading
from pox.core import core
import os
base_path = __file__
base_path = os.path.split(base_path)[0]
base_path = os.path.split(base_path)[0]
base_path += os.path.sep
def fmt_tb (tb):
f = tb.filename
if f.startswith(base_path):
f = f[len(base_path):]
l = "%s:%i" % (f, tb.lineno)
code = tb.code_context
if code: code = code[0].strip()
if not code: code = "<Unknown>"
return "%20s: %s" % (l,code)
def _trace_thread_proc ():
try:
while core.running:
frames = sys._current_frames()
for key in frames:
frame = frames[key]
print(fmt_tb(inspect.getframeinfo(frame)))
outer_frames = inspect.getouterframes(frame)
for i in range(0, len(outer_frames)):
print(" " + fmt_tb(inspect.getframeinfo(outer_frames[i][0])))
time.sleep(5)
except:
traceback.print_exc()
def launch ():
_trace_thread = threading.Thread(target=_trace_thread_proc)
_trace_thread.daemon = True
# Start it up a bit in the future so that it doesn't print all over
# init messages.
core.callDelayed(3, _trace_thread.start)
|
wadi.py | sensepost/wadi | 137 | 29090 | import sys
import os
from multiprocessing import Process, Queue, Manager
from threading import Timer
from wadi_harness import Harness
from wadi_debug_win import Debugger
import time
import hashlib
def test(msg):
while True:
print 'Process 2:' + msg
#print msg
def test2():
print 'Process 1'
time.sleep(2)
while True:
print 'Process 1'
def run_harness(t):
harness = Harness(sys.argv[1],sys.argv[2],t)
harness.run()
def run_debugger(q):
debugger = Debugger(q)
debugger.run_Browser('IE')
def timeout_debug(dp):
print '[*] Terminating Debugger Process PID: %d' % dp.pid
dp.terminate()
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass
if __name__ == '__main__':
#try:
w = wadi()
w.run()
#except:
# w.close()
|
apps/flow/run.py | rainydaygit/testtcloudserver | 349 | 29104 | from apps.flow.settings import config
if config.SERVER_ENV != 'dev':
from gevent import monkey
monkey.patch_all()
else:
pass
from apps.flow.views.deploy import deploy
from apps.flow.views.flow import flow
from library.api.tFlask import tflask
def create_app():
app = tflask(config)
register_blueprints(app)
return app
def register_blueprints(app):
app.register_blueprint(flow, url_prefix="/v1/flow")
app.register_blueprint(deploy, url_prefix="/v1/deploy")
if __name__ == '__main__':
create_app().run(port=config.PORT)
|
etl/parsers/etw/Microsoft_Windows_SecurityMitigationsBroker.py | IMULMUL/etl-parser | 104 | 29133 | <filename>etl/parsers/etw/Microsoft_Windows_SecurityMitigationsBroker.py
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-SecurityMitigationsBroker
GUID : ea8cd8a5-78ff-4418-b292-aadc6a7181df
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1003, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1003_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1004, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1004_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1005, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1005_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1006, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1006_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ACGState" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1007, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1007_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1008, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1008_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1009, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1009_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ACGState" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1010, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1010_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1011, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1011_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1012, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1012_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1013, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1013_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1014, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1014_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1015, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1015_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1016, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1016_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1017, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1017_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1018, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1018_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1019, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1019_0(Etw):
pattern = Struct(
"DriverId1" / Int64ul,
"DriverId2" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1020, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1020_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1021, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1021_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1022, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1022_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1023, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1023_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1024, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1024_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1025, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1025_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1026, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1026_0(Etw):
pattern = Struct(
"Description" / WString,
"VendorId" / Int32ul,
"DeviceId" / Int32ul,
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1027, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1027_0(Etw):
pattern = Struct(
"DriverId" / Int64ul,
"ProcessId" / Int32ul
)
@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df"), event_id=1030, version=0)
class Microsoft_Windows_SecurityMitigationsBroker_1030_0(Etw):
pattern = Struct(
"ModuleName" / WString
)
|
android/replace_apk_resource_pro/replace_source.py | roceys/tools_python | 130 | 29145 | <filename>android/replace_apk_resource_pro/replace_source.py
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.xingag.top
@software: PyCharm
@file: replace_source.py
@time: 4/25/19 10:46
@description:替换apk的资源文件
"""
from file_utils import *
import os
from subprocess import Popen, PIPE, STDOUT
class ReplaceApkSource(object):
def __init__(self):
self.file_name = 'logo_white.png'
# 目标apk的名称
self.target_apk_name = 'new.apk'
def start(self):
# 1.使用apktool.jar解压apk
file_name_pre = self.__unzip_apk()
# 2.替换资源
self.__replace_source(file_name_pre)
# 3.重新打包
self.__rezip_apk(file_name_pre)
# 4.再次签名
self.__re_sign()
def __unzip_apk(self):
"""
解压当前目录下的apk文件
:return:
"""
# 文件名称,包含后缀名
file_name = get_current_folder_file('apk')
# 文件名称,不包含后缀名
file_name_pre = file_name.split('.')[0]
os.system('java -jar apktool.jar d %s' % file_name)
print('第1步:解压成功~')
return file_name_pre
def __replace_source(self, file_name_pre):
"""
替换资源
@:param file_name_pre 文件夹的名称
:return:
"""
print('生成文件夹的名字是:%s' % file_name_pre)
# 重命令当前目录下的文件
rename_current_file("png", self.file_name)
# 待替换的完成路径是
logo_file_path = './%s/res/drawable-mdpi/logo_white.png' % file_name_pre
# 开始替换文件
replace_file('./%s' % self.file_name, logo_file_path)
print('第2步:替换资源图片成功~')
def __rezip_apk(self, folder_name):
"""
重新打包成apk
@:param folder_name 文件夹的名称 source
:return:
"""
# 重新打包成apk
os.system('java -jar apktool.jar b %s -o %s' % (folder_name, self.target_apk_name))
# 删除临时文件夹
shutil.rmtree('./%s/' % folder_name)
print('第3步:重新打包成功~')
def __re_sign(self):
"""
重新签名
:return:
"""
# 重新签名
cmd = 'jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -keystore **.keystore -storepass ** %s **' % self.target_apk_name
p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT, shell=True)
# 输入参数
p.communicate(input=b'nantian')
print('第4步:重新签名成功~')
if __name__ == '__main__':
replace_apk_source = ReplaceApkSource()
replace_apk_source.start()
print('恭喜!完成操作~')
|
train/solver.py | nhonth/DeLF-pytorch | 315 | 29156 | <reponame>nhonth/DeLF-pytorch
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
PyTorch Implementation of training DeLF feature.
Solver for step 1 (finetune local descriptor)
nashory, 2018.04
'''
import os, sys, time
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utils import Bar, Logger, AverageMeter, compute_precision_top_k, mkdir_p
'''helper functions.
'''
def __cuda__(x):
if torch.cuda.is_available():
return x.cuda()
else:
return x
def __is_cuda__():
return torch.cuda.is_available()
def __to_var__(x, volatile=False):
return Variable(x, volatile=volatile)
def __to_tensor__(x):
return x.data
class Solver(object):
def __init__(self, config, model):
self.state = {k: v for k, v in config._get_kwargs()}
self.config = config
self.epoch = 0 # global epoch.
self.best_acc = 0 # global best accuracy.
self.prefix = os.path.join('repo', config.expr)
# ship model to cuda
self.model = __cuda__(model)
# define criterion and optimizer
self.criterion = nn.CrossEntropyLoss()
if config.optim.lower() in ['rmsprop']:
self.optimizer = optim.RMSprop(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['sgd']:
self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
elif config.optim.lower() in ['adam']:
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=config.lr,
weight_decay=config.weight_decay)
# decay learning rate by a factor of 0.5 every 10 epochs
self.lr_scheduler = optim.lr_scheduler.StepLR(
self.optimizer,
step_size=config.lr_stepsize,
gamma=config.lr_gamma)
# create directory to save result if not exist.
self.ckpt_path = os.path.join(self.prefix, config.stage, 'ckpt')
self.log_path = os.path.join(self.prefix, config.stage, 'log')
self.image_path = os.path.join(self.prefix, config.stage, 'image')
mkdir_p(self.ckpt_path)
mkdir_p(self.log_path)
mkdir_p(self.image_path)
# set logger.
self.logger = {}
self.title = 'DeLF-{}'.format(config.stage.upper())
self.logger['train'] = Logger(os.path.join(self.prefix, config.stage, 'log/train.log'))
self.logger['val'] = Logger(os.path.join(self.prefix, config.stage, 'log/val.log'))
self.logger['train'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
self.logger['val'].set_names(
['epoch','lr', 'loss', 'top1_accu', 'top3_accu', 'top5_accu'])
def __exit__(self):
self.train_logger.close()
self.val_logger.close()
def __adjust_pixel_range__(self,
x,
range_from=[0,1],
range_to=[-1,1]):
'''
adjust pixel range from <range_from> to <range_to>.
'''
if not range_from == range_to:
scale = float(range_to[1]-range_to[0])/float(range_from[1]-range_from[0])
bias = range_to[0]-range_from[0]*scale
x = x.mul(scale).add(bias)
return x
def __save_checkpoint__(self, state, ckpt='ckpt', filename='checkpoint.pth.tar'):
filepath = os.path.join(ckpt, filename)
torch.save(state, filepath)
def __solve__(self, mode, epoch, dataloader):
'''solve
mode: train / val
'''
batch_timer = AverageMeter()
data_timer = AverageMeter()
prec_losses = AverageMeter()
prec_top1 = AverageMeter()
prec_top3 = AverageMeter()
prec_top5 = AverageMeter()
if mode in ['val']:
pass;
#confusion_matrix = ConusionMeter()
since = time.time()
bar = Bar('[{}]{}'.format(mode.upper(), self.title), max=len(dataloader))
for batch_idx, (inputs, labels) in enumerate(dataloader):
# measure data loading time
data_timer.update(time.time() - since)
# wrap inputs in variable
if mode in ['train']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs)
labels = __to_var__(labels)
elif mode in ['val']:
if __is_cuda__():
inputs = inputs.cuda()
labels = labels.cuda(async=True)
inputs = __to_var__(inputs, volatile=True)
labels = __to_var__(labels, volatile=False)
# forward
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# backward + optimize
if mode in ['train']:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
prec_1, prec_3, prec_5 = compute_precision_top_k(
__to_tensor__(outputs),
__to_tensor__(labels),
top_k=(1,3,5))
batch_size = inputs.size(0)
prec_losses.update(__to_tensor__(loss)[0], batch_size)
prec_top1.update(prec_1[0], batch_size)
prec_top3.update(prec_3[0], batch_size)
prec_top5.update(prec_5[0], batch_size)
# measure elapsed time
batch_timer.update(time.time() - since)
since = time.time()
# progress
log_msg = ('\n[{mode}][epoch:{epoch}][iter:({batch}/{size})]'+
'[lr:{lr}] loss: {loss:.4f} | top1: {top1:.4f} | ' +
'top3: {top3:.4f} | top5: {top5:.4f} | eta: ' +
'(data:{dt:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
.format(
mode=mode,
epoch=self.epoch+1,
batch=batch_idx+1,
size=len(dataloader),
lr=self.lr_scheduler.get_lr()[0],
loss=prec_losses.avg,
top1=prec_top1.avg,
top3=prec_top3.avg,
top5=prec_top5.avg,
dt=data_timer.val,
bt=batch_timer.val,
tt=bar.elapsed_td)
print(log_msg)
bar.next()
bar.finish()
# write to logger
self.logger[mode].append([self.epoch+1,
self.lr_scheduler.get_lr()[0],
prec_losses.avg,
prec_top1.avg,
prec_top3.avg,
prec_top5.avg])
# save model
if mode == 'val' and prec_top1.avg > self.best_acc:
print('best_acc={}, new_best_acc={}'.format(self.best_acc, prec_top1.avg))
self.best_acc = prec_top1.avg
state = {
'epoch': self.epoch,
'acc': self.best_acc,
'optimizer': self.optimizer.state_dict(),
}
self.model.write_to(state)
filename = 'bestshot.pth.tar'
self.__save_checkpoint__(state, ckpt=self.ckpt_path, filename=filename)
def train(self, mode, epoch, train_loader, val_loader):
self.epoch = epoch
if mode in ['train']:
self.model.train()
self.lr_scheduler.step()
dataloader = train_loader
else:
assert mode == 'val'
self.model.eval()
dataloader = val_loader
self.__solve__(mode, epoch, dataloader)
|
src/oci/autoscaling/models/cron_execution_schedule.py | Manny27nyc/oci-python-sdk | 249 | 29200 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .execution_schedule import ExecutionSchedule
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CronExecutionSchedule(ExecutionSchedule):
"""
An autoscaling execution schedule that uses a cron expression.
"""
def __init__(self, **kwargs):
"""
Initializes a new CronExecutionSchedule object with values from keyword arguments. The default value of the :py:attr:`~oci.autoscaling.models.CronExecutionSchedule.type` attribute
of this class is ``cron`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this CronExecutionSchedule.
:type type: str
:param timezone:
The value to assign to the timezone property of this CronExecutionSchedule.
Allowed values for this property are: "UTC"
:type timezone: str
:param expression:
The value to assign to the expression property of this CronExecutionSchedule.
:type expression: str
"""
self.swagger_types = {
'type': 'str',
'timezone': 'str',
'expression': 'str'
}
self.attribute_map = {
'type': 'type',
'timezone': 'timezone',
'expression': 'expression'
}
self._type = None
self._timezone = None
self._expression = None
self._type = 'cron'
@property
def expression(self):
"""
**[Required]** Gets the expression of this CronExecutionSchedule.
A cron expression that represents the time at which to execute the autoscaling policy.
Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`
You can use special characters that are supported with the Quartz cron implementation.
You must specify `0` as the value for seconds.
Example: `0 15 10 ? * *`
:return: The expression of this CronExecutionSchedule.
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""
Sets the expression of this CronExecutionSchedule.
A cron expression that represents the time at which to execute the autoscaling policy.
Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`
You can use special characters that are supported with the Quartz cron implementation.
You must specify `0` as the value for seconds.
Example: `0 15 10 ? * *`
:param expression: The expression of this CronExecutionSchedule.
:type: str
"""
self._expression = expression
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
thirdparty/his_evaluators/his_evaluators/utils/video.py | tj-eey/impersonator | 1,717 | 29218 | # -*- coding: utf-8 -*-
# @Time : 2019-08-02 18:31
# @Author : <NAME>
# @Email : <EMAIL>
import os
import cv2
import glob
import shutil
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import numpy as np
import subprocess
def auto_unzip_fun(x, f):
return f(*x)
def make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=24):
"""
output_path is the final mp4 name
img_dir is where the images to make into video are saved.
"""
first_img = cv2.imread(img_path_list[0])
h, w = first_img.shape[:2]
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (w, h))
args_list = [(img_path,) for img_path in img_path_list]
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=cv2.imread), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
if save_frames_dir:
for i, img_path in enumerate(img_path_list):
shutil.copy(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
def fuse_image(img_path_list, row_num, col_num):
assert len(img_path_list) == row_num * col_num
img_list = [cv2.imread(img_path) for img_path in img_path_list]
row_imgs = []
for i in range(row_num):
col_imgs = img_list[i * col_num: (i + 1) * col_num]
col_img = np.concatenate(col_imgs, axis=1)
row_imgs.append(col_img)
fused_img = np.concatenate(row_imgs, axis=0)
return fused_img
def fuse_video(video_frames_path_list, output_mp4_path, row_num, col_num, fps=24):
assert len(video_frames_path_list) == row_num * col_num
frame_num = len(video_frames_path_list[0])
first_img = cv2.imread(video_frames_path_list[0][0])
h, w = first_img.shape[:2]
fused_h, fused_w = h * row_num, w * col_num
args_list = []
for frame_idx in range(frame_num):
fused_frame_path_list = [video_frames[frame_idx] for video_frames in video_frames_path_list]
args_list.append((fused_frame_path_list, row_num, col_num))
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# for args in args_list:
# fuse_image(*args)
# exit()
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=fuse_image), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % (tmp_avi_video_path))
def merge(src_img, ref_img_path, out_img_path, pad):
h, w = src_img.shape[:2]
image_size = h
ref_img = cv2.imread(ref_img_path)
out_img = cv2.imread(out_img_path)
if ref_img.shape[0] != image_size and ref_img.shape[1] != image_size:
ref_img = cv2.resize(ref_img, (image_size, image_size))
if out_img.shape[0] != image_size and out_img.shape[1] != image_size:
out_img = cv2.resize(out_img, (image_size, image_size))
# print(src_img.shape, ref_img.shape, out_img.shape)
merge_img = np.concatenate([src_img, pad, ref_img, pad, out_img], axis=1)
return merge_img
def load_image(image_path, image_size=512):
"""
Args:
image_path (str):
image_size (int):
Returns:
image (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
image = cv2.imread(image_path)
image = cv2.resize(image, (image_size, image_size))
return image
def fuse_one_image(img_paths, image_size):
return load_image(img_paths[0], image_size)
def fuse_two_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
img_size = image_size // 2
img_1 = load_image(img_paths[0], img_size)
img_2 = load_image(img_paths[1], img_size)
fuse_img = np.concatenate([img_1, img_2], axis=0)
return fuse_img
def fuse_four_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:2], image_size)
fuse_img_2 = fuse_two_images(img_paths[2:4], image_size)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=1)
return fuse_img
def fuse_eight_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:4], image_size // 2)
fuse_img_2 = fuse_two_images(img_paths[4:8], image_size // 2)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=0)
return fuse_img
def fuse_source(all_src_img_paths, image_size=512):
"""
Args:
all_src_img_paths (list of str): the list of source image paths, currently it only supports, 1, 2, 4, 8 number
of source images.
image_size (int): the final image resolution, (image_size, image_size, 3)
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
ns = len(all_src_img_paths)
# TODO, currently it only supports, 1, 2, 4, 8 number of source images.
assert ns in [1, 2, 4, 8], "{} must be in [1, 2, 4, 8], currently it only supports, " \
"1, 2, 4, 8 number of source images."
if ns == 1:
fuse_img = load_image(all_src_img_paths[0], image_size)
elif ns == 2:
fuse_img = fuse_two_images(all_src_img_paths, image_size)
elif ns == 4:
fuse_img = fuse_four_images(all_src_img_paths, image_size)
elif ns == 8:
fuse_img = fuse_eight_images(all_src_img_paths, image_size)
else:
raise ValueError("{} must be in [1, 2, 4, 8], currently it only supports, "
"1, 2, 4, 8 number of source images.")
return fuse_img
def fuse_source_reference_output(output_mp4_path, src_img_paths, ref_img_paths, out_img_paths,
image_size=512, pad=10, fps=25):
total = len(ref_img_paths)
assert total == len(out_img_paths), "{} != {}".format(total, len(out_img_paths))
fused_src_img = fuse_source(src_img_paths, image_size)
pad_region = np.zeros((image_size, pad, 3), dtype=np.uint8)
pool_size = min(15, os.cpu_count())
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
W = fused_src_img.shape[1] + (image_size + pad) * 2
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (W, image_size))
with ProcessPoolExecutor(pool_size) as pool:
for img in tqdm(pool.map(merge, [fused_src_img] * total,
ref_img_paths, out_img_paths, [pad_region] * total)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
|
parsifal/apps/reviews/migrations/0020_searchresult.py | ShivamPytho/parsifal | 342 | 29244 | <reponame>ShivamPytho/parsifal<filename>parsifal/apps/reviews/migrations/0020_searchresult.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import parsifal.apps.reviews.models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0013_auto_20150710_1614'),
('reviews', '0019_study_comments'),
]
operations = [
migrations.CreateModel(
name='SearchResult',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('imported_file', models.FileField(null=True, upload_to=parsifal.apps.reviews.models.search_result_file_upload_to)),
('documents', models.ManyToManyField(to='library.Document')),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Review')),
('search_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.SearchSession', null=True)),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Source')),
],
),
]
|
mastiff/plugins/analysis/EXE/EXE-singlestring.py | tt1379/mastiff | 164 | 29245 | #!/usr/bin/env python
"""
Copyright 2012-2013 The MASTIFF Project, All Rights Reserved.
This software, having been partly or wholly developed and/or
sponsored by KoreLogic, Inc., is hereby released under the terms
and conditions set forth in the project's "README.LICENSE" file.
For a list of all contributors and sponsors, please refer to the
project's "README.CREDITS" file.
"""
__doc__ = """
Single-byte string plug-in
Plugin Type: EXE
Purpose:
Attackers have begun to obfuscate embedded strings by moving a single byte
at a time into a character array. In assembler, it looks like:
mov mem, 0x68
mov mem+4, 0x69
mov mem+8, 0x21
...
Using a strings program, these strings will not be found. This script looks
for any strings embedded in this way and prints them out. It does this by
looking through the file for C6 opcodes, which are the start of the
"mov mem/reg, imm" instruction. It will then decode it, grab the value and
create a string from it.
Requirements:
- distorm3 (http://code.google.com/p/distorm/)
Output:
None
"""
__version__ = "$Id: 6322146c8d971464c6f726ebdba3a3d7a2540028 $"
import logging
import re
import os
try:
from distorm3 import Decode, Decode32Bits
except ImportError, err:
print "EXE-SingleString: Could not import distorm3: %s" % error
import mastiff.plugins.category.exe as exe
# Change the class name and the base class
class SingleString(exe.EXECat):
"""Extract single-byte strings from an executable."""
def __init__(self):
"""Initialize the plugin."""
exe.EXECat.__init__(self)
self.length = 3
self.raw = False
def activate(self):
"""Activate the plugin."""
exe.EXECat.activate(self)
def deactivate(self):
"""Deactivate the plugin."""
exe.EXECat.deactivate(self)
def findMov(self, filename):
""" look through the file for any c6 opcode (mov reg/mem, imm)
when it finds one, decode it and put it into a dictionary """
#log = logging.getLogger('Mastiff.Plugins.' + self.name + '.findMov')
f = open(filename,'rb')
offset = 0
instructs = {}
mybyte = f.read(1)
while mybyte:
if mybyte == "\xc6":
# found a mov op - decode and record it
f.seek(offset)
mybyte = f.read(16)
# p will come back as list of (offset, size, instruction, hexdump)
p = Decode(offset, mybyte, Decode32Bits)
# break up the mnemonic
ma = re.match('(MOV) ([\S\s]+), ([x0-9a-fA-F]+)', p[0][2])
if ma is not None:
instructs[offset] = [ma.group(1), ma.group(2), ma.group(3), p[0][1]] # mnemonic, size
#log.debug( "MOV instructions detected: %x %s %d" % (offset,p[0][2],p[0][1]) )
f.seek(offset+1)
mybyte = f.read(1)
offset = offset + 1
f.close()
return instructs
def decodeBytes(self, instructs):
""" Take in a dict of instructions - parse through each instruction and grab the strings """
#log = logging.getLogger('Mastiff.Plugins.' + self.name + '.decodeBytes')
curString = ""
curOffset = 0
strList = []
usedBytes = []
for off in sorted(instructs.keys()):
if off not in usedBytes:
# set up the new offset if needed
if curOffset == 0:
curOffset = off
while off in instructs:
usedBytes.append(off)
hexVal = int(instructs[off][2], 16)
opLen = instructs[off][3]
# is hexVal out of range?
if hexVal < 32 or hexVal > 126 and (hexVal != 10 or hexVal != 13 or hexVal != 9):
# end of string
#log.debug("%x non-string char - new string: %d: %s" % (curOffset, hexVal,curString))
strList.append([curOffset, curString])
curOffset = off + opLen
curString = ""
else:
#add to string
if not self.raw and hexVal == 10:
# line feed
curString = curString + "\\r"
elif not self.raw and hexVal == 13:
# return
curString = curString + "\\n"
elif not self.raw and hexVal == 9:
# tab
curString = curString + "\\t"
else:
curString = curString + chr(hexVal)
off = off + opLen
strList.append([curOffset, curString])
curOffset = 0
curString = ""
usedBytes.append(off)
return strList
def analyze(self, config, filename):
"""Analyze the file."""
# sanity check to make sure we can run
if self.is_activated == False:
return False
log = logging.getLogger('Mastiff.Plugins.' + self.name)
log.info('Starting execution.')
self.length = config.get_var(self.name, 'length')
if self.length is None:
self.length = 3
self.raw = config.get_bvar(self.name, 'raw')
# find the bytes in the file
instructs = self.findMov(filename)
# now lets get the strings
strlist = self.decodeBytes(instructs)
self.output_file(config.get_var('Dir','log_dir'), strlist)
return True
def output_file(self, outdir, strlist):
"""Print output from analysis to a file."""
log = logging.getLogger('Mastiff.Plugins.' + self.name + '.output_file')
# if the string is of the right len, print it
outstr = ""
for string in strlist:
if len(string[1]) >= int(self.length):
outstr = outstr + '0x%x: %s\n' % (string[0], string[1])
if len(outstr) > 0:
try:
outfile = open(outdir + os.sep + 'single-string.txt', 'w')
except IOError, err:
log.debug("Cannot open single-string.txt: %s" % err)
return False
outfile.write(outstr)
outfile.close()
else:
log.debug('No single-byte strings found.')
return True
|
tests/ext/test_ext_plugin.py | tomekr/cement | 826 | 29252 | <reponame>tomekr/cement
from cement.ext.ext_plugin import CementPluginHandler
# module tests
class TestCementPluginHandler(object):
def test_subclassing(self):
class MyPluginHandler(CementPluginHandler):
class Meta:
label = 'my_plugin_handler'
h = MyPluginHandler()
assert h._meta.interface == 'plugin'
assert h._meta.label == 'my_plugin_handler'
# app functionality and coverage tests
|
opensanctions/crawlers/eu_fsf.py | quantumchips/opensanctions | 102 | 29257 | from prefixdate import parse_parts
from opensanctions import helpers as h
from opensanctions.util import remove_namespace
def parse_address(context, el):
country = el.get("countryDescription")
if country == "UNKNOWN":
country = None
# context.log.info("Addrr", el=el)
return h.make_address(
context,
street=el.get("street"),
po_box=el.get("poBox"),
city=el.get("city"),
place=el.get("place"),
postal_code=el.get("zipCode"),
region=el.get("region"),
country=country,
country_code=el.get("countryIso2Code"),
)
def parse_entry(context, entry):
subject_type = entry.find("./subjectType")
schema = context.lookup_value("subject_type", subject_type.get("code"))
if schema is None:
context.log.warning("Unknown subject type", type=subject_type)
return
entity = context.make(schema)
entity.id = context.make_slug(entry.get("euReferenceNumber"))
entity.add("notes", entry.findtext("./remark"))
entity.add("topics", "sanction")
sanction = h.make_sanction(context, entity)
regulation = entry.find("./regulation")
source_url = regulation.findtext("./publicationUrl", "")
sanction.set("sourceUrl", source_url)
sanction.add("program", regulation.get("programme"))
sanction.add("reason", regulation.get("numberTitle"))
sanction.add("startDate", regulation.get("entryIntoForceDate"))
sanction.add("listingDate", regulation.get("publicationDate"))
for name in entry.findall("./nameAlias"):
if entry.get("strong") == "false":
entity.add("weakAlias", name.get("wholeName"))
else:
entity.add("name", name.get("wholeName"))
entity.add("title", name.get("title"), quiet=True)
entity.add("firstName", name.get("firstName"), quiet=True)
entity.add("middleName", name.get("middleName"), quiet=True)
entity.add("lastName", name.get("lastName"), quiet=True)
entity.add("position", name.get("function"), quiet=True)
gender = h.clean_gender(name.get("gender"))
entity.add("gender", gender, quiet=True)
for node in entry.findall("./identification"):
type = node.get("identificationTypeCode")
schema = "Passport" if type == "passport" else "Identification"
passport = context.make(schema)
passport.id = context.make_id("ID", entity.id, node.get("logicalId"))
passport.add("holder", entity)
passport.add("authority", node.get("issuedBy"))
passport.add("type", node.get("identificationTypeDescription"))
passport.add("number", node.get("number"))
passport.add("number", node.get("latinNumber"))
passport.add("startDate", node.get("issueDate"))
passport.add("startDate", node.get("issueDate"))
passport.add("country", node.get("countryIso2Code"))
passport.add("country", node.get("countryDescription"))
for remark in node.findall("./remark"):
passport.add("summary", remark.text)
context.emit(passport)
for node in entry.findall("./address"):
address = parse_address(context, node)
h.apply_address(context, entity, address)
for child in node.getchildren():
if child.tag in ("regulationSummary"):
continue
elif child.tag == "remark":
entity.add("notes", child.text)
elif child.tag == "contactInfo":
prop = context.lookup_value("contact_info", child.get("key"))
if prop is None:
context.log.warning("Unknown contact info", node=child)
else:
entity.add(prop, child.get("value"))
else:
context.log.warning("Unknown address component", node=child)
for birth in entry.findall("./birthdate"):
partialBirth = parse_parts(
birth.get("year"), birth.get("month"), birth.get("day")
)
entity.add("birthDate", birth.get("birthdate"))
entity.add("birthDate", partialBirth)
address = parse_address(context, birth)
if address is not None:
entity.add("birthPlace", address.get("full"))
entity.add("country", address.get("country"))
for node in entry.findall("./citizenship"):
entity.add("nationality", node.get("countryIso2Code"), quiet=True)
entity.add("nationality", node.get("countryDescription"), quiet=True)
context.emit(entity, target=True, unique=True)
context.emit(sanction)
def crawl(context):
path = context.fetch_resource("source.xml", context.dataset.data.url)
context.export_resource(path, "text/xml", title=context.SOURCE_TITLE)
doc = context.parse_resource_xml(path)
doc = remove_namespace(doc)
for entry in doc.findall(".//sanctionEntity"):
parse_entry(context, entry)
|
tools/Sikuli/DoReplace.sikuli/DoReplace.py | marmyshev/vanessa-automation | 296 | 29288 | click(Pattern("Bameumbrace.png").similar(0.80))
sleep(1)
click("3abnb.png")
exit(0)
|
openfda/deploy/tests/adae/test_endpoint.py | hobochili/openfda | 388 | 29300 | <filename>openfda/deploy/tests/adae/test_endpoint.py
# coding=utf-8
import inspect
import sys
from openfda.tests.api_test_helpers import *
def test_nullified_records():
NULLIFIED = ['USA-FDACVM-2018-US-045311', 'USA-FDACVM-2018-US-048571', 'USA-FDACVM-2018-US-046672',
'USA-FDACVM-2017-US-070108', 'USA-FDACVM-2017-US-002864', 'USA-FDACVM-2017-US-002866',
'USA-FDACVM-2017-US-052458', 'USA-FDACVM-2017-US-055193', 'USA-FDACVM-2017-US-043931',
'USA-FDACVM-2018-US-002321', 'USA-FDACVM-2017-US-042492', 'USA-FDACVM-2018-US-044065'
]
for case_num in NULLIFIED:
meta, results = fetch(
'/animalandveterinary/event.json?search=unique_aer_id_number:' + case_num)
eq_(results, None)
def test_single_ae_record():
meta, results = fetch(
'/animalandveterinary/event.json?search=unique_aer_id_number:USA-USFDACVM-2015-US-094810')
eq_(len(results), 1)
ae = results[0]
eq_("USA-USFDACVM-2015-US-094810", ae["unique_aer_id_number"])
eq_(None, ae.get("@id"))
eq_("N141251", ae["report_id"])
eq_("20150126", ae["original_receive_date"])
eq_("Food and Drug Administration Center for Veterinary Medicine", ae["receiver"]["organization"])
eq_("7500 Standish Place (HFV-210) Room N403", ae["receiver"]["street_address"])
eq_("Rockville", ae["receiver"]["city"])
eq_("MD", ae["receiver"]["state"])
eq_("20855", ae["receiver"]["postal_code"])
eq_("USA", ae["receiver"]["country"])
eq_("Other", ae["primary_reporter"])
eq_("Safety Issue", ae["type_of_information"])
eq_("true", ae["serious_ae"])
eq_("1", ae["number_of_animals_treated"])
eq_("1", ae["number_of_animals_affected"])
eq_("Dog", ae["animal"]["species"])
eq_("Male", ae["animal"]["gender"])
eq_("Neutered", ae["animal"]["reproductive_status"])
eq_("NOT APPLICABLE", ae["animal"]["female_animal_physiological_status"])
eq_("1.00", ae["animal"]["age"]["min"])
eq_(None, ae["animal"]["age"].get("max"))
eq_("Year", ae["animal"]["age"]["unit"])
eq_("Measured", ae["animal"]["age"]["qualifier"])
eq_("38.419", ae["animal"]["weight"]["min"])
eq_(None, ae["animal"]["weight"].get("max"))
eq_("Kilogram", ae["animal"]["weight"]["unit"])
eq_("Measured", ae["animal"]["weight"]["qualifier"])
eq_("false", ae["animal"]["breed"]["is_crossbred"])
eq_("Retriever - Labrador", ae["animal"]["breed"]["breed_component"])
eq_("Recovered/Normal", ae["outcome"][0]["medical_status"])
eq_("1", ae["outcome"][0]["number_of_animals_affected"])
eq_("Good", ae["health_assessment_prior_to_exposure"]["condition"])
eq_("Veterinarian", ae["health_assessment_prior_to_exposure"]["assessed_by"])
eq_("20141222", ae["onset_date"])
eq_({'value': '4', 'unit': 'Week'}, ae.get("duration"))
eq_("11", ae["reaction"][0]["veddra_version"])
eq_("129", ae["reaction"][0]["veddra_term_code"])
eq_("Vocalisation", ae["reaction"][0]["veddra_term_name"])
eq_("1", ae["reaction"][0]["number_of_animals_affected"])
eq_("Actual", ae["reaction"][0]["accuracy"])
eq_("11", ae["reaction"][1]["veddra_version"])
eq_("960", ae["reaction"][1]["veddra_term_code"])
eq_("Pruritus", ae["reaction"][1]["veddra_term_name"])
eq_("1", ae["reaction"][1]["number_of_animals_affected"])
eq_("Actual", ae["reaction"][1]["accuracy"])
eq_(None, ae.get("time_between_exposure_and_onset"))
eq_("false", ae["treated_for_ae"])
eq_(1, len(ae["drug"]))
eq_("20141222", ae["drug"][0]["first_exposure_date"])
eq_("20141222", ae["drug"][0]["last_exposure_date"])
eq_("Animal Owner", ae["drug"][0]["administered_by"])
eq_("Topical", ae["drug"][0]["route"])
eq_("1", ae["drug"][0]["dose"]["numerator"])
eq_("tube", ae["drug"][0]["dose"]["numerator_unit"])
eq_("1", ae["drug"][0]["dose"]["denominator"])
eq_("dose", ae["drug"][0]["dose"]["denominator_unit"])
eq_('false', ae["drug"][0].get("used_according_to_label"))
eq_('Overdosed', ae["drug"][0].get("off_label_use"))
eq_("false", ae["drug"][0]["previous_exposure_to_drug"])
eq_(None, ae["drug"][0].get("previous_ae_to_drug"))
eq_(None, ae["drug"][0].get("ae_abated_after_stopping_drug"))
eq_(None, ae["drug"][0].get("ae_reappeared_after_resuming_drug"))
eq_(None, ae["drug"][0].get("manufacturing_date"))
eq_('KP09ECX KP09C4D', ae["drug"][0].get("lot_number"))
eq_('2017-01', ae["drug"][0].get("lot_expiration"))
eq_('000859-2339', ae["drug"][0].get("product_ndc"))
eq_("MSK", ae["drug"][0]["brand_name"])
eq_('Solution', ae["drug"][0]["dosage_form"])
eq_("MSK", ae["drug"][0]["manufacturer"]["name"])
eq_("USA-USFDACVM-N141251", ae["drug"][0]["manufacturer"]["registration_number"])
eq_(None, ae["drug"][0].get("number_of_defective_items"))
eq_(None, ae["drug"][0].get("number_of_items_returned"))
eq_("QP54AB52", ae["drug"][0]["atc_vet_code"])
eq_("Imidacloprid", ae["drug"][0]["active_ingredients"][0]["name"])
eq_("500", ae["drug"][0]["active_ingredients"][0]["dose"]["numerator"])
eq_("Milligram", ae["drug"][0]["active_ingredients"][0]["dose"]["numerator_unit"])
eq_("5", ae["drug"][0]["active_ingredients"][0]["dose"]["denominator"])
eq_("mL", ae["drug"][0]["active_ingredients"][0]["dose"]["denominator_unit"])
if __name__ == '__main__':
all_functions = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for key, func in all_functions:
if key.find("test_") > -1:
func()
|
Chapter08/chapter8_sflowtool_1.py | stavsta/Mastering-Python-Networking-Second-Edition | 107 | 29309 | #!/usr/bin/env python3
import sys, re
for line in iter(sys.stdin.readline, ''):
if re.search('agent ', line):
print(line.strip())
|
pkgs/nbconvert-4.1.0-py27_0/lib/python2.7/site-packages/nbconvert/filters/markdown.py | wangyum/anaconda | 652 | 29343 | <gh_stars>100-1000
"""Markdown filters
This file contains a collection of utility filters for dealing with
markdown within Jinja templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import subprocess
from io import TextIOWrapper, BytesIO
try:
from .markdown_mistune import markdown2html_mistune
except ImportError as e:
# store in variable for Python 3
_mistune_import_error = e
def markdown2html_mistune(source):
"""mistune is unavailable, raise ImportError"""
raise ImportError("markdown2html requires mistune: %s" % _mistune_import_error)
from nbconvert.utils.pandoc import pandoc
from nbconvert.utils.exceptions import ConversionException
from nbconvert.utils.version import check_version
from ipython_genutils.py3compat import cast_bytes
__all__ = [
'markdown2html',
'markdown2html_pandoc',
'markdown2html_mistune',
'markdown2latex',
'markdown2rst',
]
def markdown2latex(source, markup='markdown', extra_args=None):
"""Convert a markdown string to LaTeX via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
markup : string
Markup used by pandoc's reader
default : pandoc extended markdown
(see http://johnmacfarlane.net/pandoc/README.html#pandocs-markdown)
Returns
-------
out : string
Output as returned by pandoc.
"""
return pandoc(source, markup, 'latex', extra_args=extra_args)
def markdown2html_pandoc(source, extra_args=None):
"""Convert a markdown string to HTML via pandoc"""
extra_args = extra_args or ['--mathjax']
return pandoc(source, 'markdown', 'html', extra_args=extra_args)
# The mistune renderer is the default, because it's simple to depend on it
markdown2html = markdown2html_mistune
def markdown2rst(source, extra_args=None):
"""Convert a markdown string to ReST via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid markdown.
Returns
-------
out : string
Output as returned by pandoc.
"""
return pandoc(source, 'markdown', 'rst', extra_args=extra_args)
|
datasets/__init__.py | Masterchef365/pvcnn | 477 | 29365 | from datasets.s3dis import S3DIS
|
preprocess/conll_to_factors.py | thilakshiK/wmt16-scripts | 132 | 29398 | <reponame>thilakshiK/wmt16-scripts<filename>preprocess/conll_to_factors.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Distributed under MIT license
# take conll file, and bpe-segmented text, and produce factored output
import sys
import re
from collections import namedtuple
Word = namedtuple(
'Word',
['pos', 'word', 'lemma', 'tag', 'morph', 'head', 'func', 'proj_head', 'proj_func'])
def escape_special_chars(line):
line = line.replace('\'', ''') # xml
line = line.replace('"', '"') # xml
line = line.replace('[', '[') # syntax non-terminal
line = line.replace(']', ']') # syntax non-terminal
line = line.replace('|', '|')
return line
def read_sentences(fobj):
sentence = []
for line in fobj:
if line == "\n":
yield sentence
sentence = []
continue
try:
(
pos,
word,
lemma,
tag,
tag2,
morph,
head,
func,
proj_head,
proj_func,
) = line.split()
except ValueError: # Word may be unicode whitespace.
(
pos,
word,
lemma,
tag,
tag2,
morph,
head,
func,
proj_head,
proj_func,
) = re.split(' *\t*', line.strip())
word = escape_special_chars(word)
lemma = escape_special_chars(lemma)
morph = morph.replace('|',',')
if proj_head == '_':
proj_head = head
proj_func = func
sentence.append(
Word(
int(pos), word, lemma, tag2, morph, int(head), func, int(proj_head),
proj_func))
def get_factors(sentence, idx):
word = sentence[idx]
factors = [word.lemma, word.tag, word.func]
return factors
#text file that has been preprocessed and split with BPE
bpe_file = open(sys.argv[1])
#conll file with annotation of original corpus; mapping is done by index, so number of sentences and words (before BPE) must match
conll_file = open(sys.argv[2])
conll_sentences = read_sentences(conll_file)
for line in bpe_file:
state = "O"
i = 0
sentence = conll_sentences.next()
for word in line.split():
factors = get_factors(sentence, i)
if word.endswith('@@'):
if state == "O" or state == "E":
state = "B"
elif state == "B" or state == "I":
state = "I"
else:
i += 1
if state == "B" or state == "I":
state = "E"
else:
state = "O"
sys.stdout.write('|'.join([word, state] + factors) + ' ')
sys.stdout.write('\n')
|
Interview Preparation Kit/01 - Warm-up Challenges/04 - Repeated String.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 29420 | # ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/repeated-string/problem
# Difficulty: Easy
# Max Score: 20
# Language: Python
# ========================
# Solution
# ========================
import os
# Complete the repeatedString function below.
def repeatedString(s, n):
count_1 = n//len(s) * s.count('a')
remained_string = n%len(s)
count_2 = s[:remained_string].count('a')
return count_1 + count_2
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
n = int(input())
result = repeatedString(s, n)
fptr.write(str(result) + '\n')
fptr.close()
|
docs/sample_code/debugging_info/src/dataset.py | mindspore-ai/docs | 288 | 29434 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dataset
Custom dataset.
"""
import numpy as np
from mindspore import dataset as ds
def get_data(num, img_size=(1, 32, 32), num_classes=10, is_onehot=True):
for _ in range(num):
img = np.random.randn(*img_size)
target = np.random.randint(0, num_classes)
target_ret = np.array([target]).astype(np.float32)
if is_onehot:
target_onehot = np.zeros(shape=(num_classes,))
target_onehot[target] = 1
target_ret = target_onehot.astype(np.float32)
yield img.astype(np.float32), target_ret
def create_train_dataset(num_data=32768, batch_size=32, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size, drop_remainder=True)
input_data = input_data.repeat(repeat_size)
return input_data
def create_eval_dataset(num_data=2048, batch_size=2048, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size)
input_data = input_data.repeat(repeat_size)
return input_data
|
.setup/bin/input_forum_data.py | zeez2030/Submitty | 411 | 29445 | #!/usr/bin/env python3
import os
import sys
import json
from datetime import datetime
from submitty_utils import dateutils
def generatePossibleDatabases():
current = dateutils.get_current_semester()
pre = 'submitty_' + current + '_'
path = "/var/local/submitty/courses/" + current
return [pre + name for name in sorted(os.listdir(path)) if os.path.isdir(path + "/" + name)]
if(__name__ == "__main__"):
num_args = len(sys.argv)
possible_databases = generatePossibleDatabases()
database = possible_databases[0]
if(num_args > 2):
print('Too many arguments. Use --help for help.')
sys.exit()
elif(num_args == 2):
if(sys.argv[1] == '--help' or sys.argv[1] == '-h'):
print('This tool can be used to test forum scalability -- pg_dump after execution to save the test data which can be sourced later.')
print('This tool takes in an optional argument: database, so an example usage is: `python3 input_forum_data.py submitty_f18_blank`')
print('Note this will delete forum data in the database you specify. The database will default to `submitty_f18_blank` if not specified.')
sys.exit()
elif(sys.argv[1] not in possible_databases):
print('Unknown argument: {:s}, use --help or -h for help.'.format(sys.argv[1]))
sys.exit()
database = sys.argv[1]
threads = abs(int(input("Enter number of threads (i.e. 1000): ").strip()))
posts = abs(int(input("Enter number of posts per thread (i.e. 20): ").strip()))
usr_path = "/usr/local/submitty"
settings = json.load(open(os.path.join(usr_path, ".setup", "submitty_conf.json")))
print("WARNING: This tool is going to delete data from the following tables:\n\tthreads\n\tposts\n\tforum_posts_history\n\tstudent_favorites\n\tviewed_responses\n\tthread_categories\n\tcategories_list")
answer = input("Do you agree for this data to be removed from {:s}? [yes/no]: ".format(database)).strip()
if(answer.lower() != "yes"):
print("Exiting...")
sys.exit()
variables = (settings['database_password'], settings['database_host'], settings['database_user'], database)
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"TRUNCATE TABLE threads RESTART IDENTITY CASCADE\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"DELETE FROM thread_categories\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"TRUNCATE TABLE categories_list RESTART IDENTITY CASCADE\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO categories_list (category_desc) VALUES ('TESTDATA')\" > /dev/null""".format(*variables))
print()
for i in range(threads):
if((i+1) % 10 == 0):
print("Completed: {:d}/{:d}".format(i+1, threads))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO threads (title, created_by, pinned, deleted, merged_thread_id, merged_post_id, is_visible) VALUES (\'{:s}\', \'{:s}\', false, false, -1, -1, true)\" > /dev/null""".format(*variables, "Thread{:d}".format(i+1), "aphacker"))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO thread_categories (thread_id, category_id) VALUES ({:d}, 1)\" > /dev/null""".format(*variables, i+1))
for pid in range(posts):
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO posts (thread_id, parent_id, author_user_id, content, timestamp, anonymous, deleted, type, has_attachment) VALUES ({}, {}, {}, {}, \'{}\', false, false, 0, false)\" > /dev/null""".format(*variables, i+1, -1 if pid == 0 else i*posts + pid, "'aphacker'", "'Post{:d}'".format(i*posts + pid+1), datetime.now()))
|
webspider/utils/log.py | chem2099/webspider | 256 | 29461 | # coding: utf-8
import os
import logging.config
from webspider import setting
LOG_FILE_PATH = os.path.join(setting.BASE_DIR, 'log', 'spider_log.txt')
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s',
'datefmt': '%Y/%m/%d %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.StreamHandler'
},
'smtp': {
'level': 'ERROR',
'class': 'logging.handlers.SMTPHandler',
'formatter': 'default',
'mailhost': (setting.SMTP_CONF['host'], setting.SMTP_CONF['port']),
'fromaddr': setting.SMTP_CONF['from_email'],
'toaddrs': [setting.SMTP_CONF['to_email'], ],
'subject': '爬虫系统出现异常',
'credentials': (setting.MAIL_CONF['username'], setting.MAIL_CONF['password'])
},
'file': {
'level': 'ERROR',
'formatter': 'default',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_FILE_PATH,
'encoding': 'utf8'
},
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'webspider': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'tornado': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'tornado.access': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'tornado.application': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'tornado.general': {
'handlers': ['console', 'file'],
'propagate': False,
'level': 'INFO',
},
'sqlalchemy.engine': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'gunicorn': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'celery': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
},
}
def config_logging():
"""配置日志"""
logging.config.dictConfig(LOGGING_CONFIG)
|
apps/show_plots.py | avdmitry/convnet | 293 | 29507 | import glob
import matplotlib.pyplot as plt
import numpy as np
import sys
plt.ion()
data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))
valid_data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))
for fname in data_files:
data = np.loadtxt(fname).reshape(-1, 3)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 2], label=name)
for fname in valid_data_files:
data = np.loadtxt(fname).reshape(-1, 2)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 1], label=name)
plt.legend(loc=1)
raw_input('Press Enter.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.