max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
anuga/utilities/parse.py | samcom12/anuga_core | 136 | 12672094 | from .data_audit import license_file_is_valid
import sys
def print_tree(n, indent=0):
while n:
print(" "*indent, n)
print_tree(n.firstChild, indent+4)
n = n.nextSibling
fid = open(sys.argv[1])
license_file_is_valid(fid, '.')
fid.close()
|
test/integration/expected_out_single_line/CantAffordActiveException.py | Inveracity/flynt | 487 | 12672099 | <gh_stars>100-1000
from exceptions import PydolonsError
class CantAffordActiveError(PydolonsError):
def __init__(self, active, missing):
assert missing in ["mana", "stamina", "health"]
self.active = active
self.missing = missing
def __repr__(self):
return f"Need more {self.missing} to activate {self.active}" |
galileo/framework/tf/python/convolutions/sage_layer.py | YaoPu2021/galileo | 115 | 12672101 | # Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dropout
from galileo.platform.export import export
from galileo.framework.python.base_message_passing import BaseMessagePassing
from galileo.framework.tf.python.layers.aggregators import get_aggregator
@export('galileo.tf')
class SAGELayer(Layer, BaseMessagePassing):
r'''
\brief graphSAGE convolution tf layer
`"Inductive Representation Learning on Large Graphs"
<https://arxiv.org/abs/1706.02216>`
'''
def __init__(self,
output_dim: int,
aggregator_name: str = 'mean',
use_concat_in_aggregator: bool = True,
bias: bool = True,
dropout_rate: float = 0.0,
activation=None,
normalization=None,
**kwargs):
r'''
\param output_dim output dim of layer
\param aggregator_name aggregator name, one of
"mean, gcn, meanpool, maxpool, lstm"
\param use_concat_in_aggregator concat if True else sum when aggregate
\param bias bias of layer
\param dropout_rate feature dropout rate
\param activation callable, apply activation to
the updated vertices features
\param normalization callable, apply normalization to
the updated vertices features
'''
# tensorflow replace the base class to base_layer_v1.Layer
# when use estimator, so can't call Layer.__init__()
self.__class__.__bases__[0].__init__(self, **kwargs)
BaseMessagePassing.__init__(self)
self.output_dim = output_dim
self.aggregator_name = aggregator_name
self.use_concat_in_aggregator = use_concat_in_aggregator
self.bias = bias
self.dropout_rate = dropout_rate
self.activation = activation
self.normalization = normalization
aggregator_class = get_aggregator(aggregator_name)
self.aggregator = aggregator_class(output_dim,
use_concat_in_aggregator, bias)
self.feature_dropout = Dropout(dropout_rate)
def call(self, inputs, training=None):
r'''
\param inputs
tensors of inputs shape (batch_size, *, fanouts, feature_dim)
'''
return BaseMessagePassing.__call__(self, inputs, training=training)
def message(self, inputs, training=None):
src = inputs['src_feature']
dst = inputs['dst_feature']
edge_weight = inputs.get('edge_weight')
if edge_weight is not None:
dst = dst * edge_weight
src = self.feature_dropout(src, training=training)
dst = self.feature_dropout(dst, training=training)
return dict(src_feature=src, dst_feature=dst)
def aggregate(self, inputs):
src = inputs['src_feature']
dst = inputs['dst_feature']
# dst -> src is direction of aggregation
return self.aggregator((src, dst))
def update(self, inputs):
if callable(self.activation):
inputs = self.activation(inputs)
if callable(self.normalization):
inputs = self.normalization(inputs)
return inputs
def get_config(self):
config = super().get_config()
config.update(
dict(
output_dim=self.output_dim,
aggregator_name=self.aggregator_name,
use_concat_in_aggregator=self.use_concat_in_aggregator,
bias=self.bias,
dropout_rate=self.dropout_rate,
activation=self.activation,
normalization=self.normalization,
))
return config
|
nflows/version.py | Tennessee-Wallaceh/nflows | 522 | 12672147 | __version__ = "0.14"
VERSION = __version__
|
src/embedding/conf.py | mykiscool/DeepCamera | 914 | 12672161 | # -*- coding: UTF-8 -*-
# run: $gunicorn -c conf.py upload_api:app
import sys
import os
import multiprocessing
sys.path.append(os.path.abspath('upload_api.py'))
sys.path.append('.')
sys.path.append('..')
from upload_api import crons_start
path_of_current_file = os.path.abspath(__file__)
path_of_current_dir = os.path.split(path_of_current_file)[0]
_file_name = os.path.basename(__file__)
bind = '0.0.0.0:5000'
workers = 3
# workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'sync'
timeout = 100
# debug=True
loglevel = 'debug'
# pidfile = '%s/run/%s.pid' % (path_of_current_dir, _file_name)
errorlog = '%s/logs/%s_error.log' % (path_of_current_dir, _file_name)
accesslog = '%s/logs/%s_access.log' % (path_of_current_dir, _file_name)
def on_starting(server):
# gunicorn 主进程启动之前的操作
crons_start()
|
eeauditor/tests/test_Amazon_EFS_Auditor.py | kbhagi/ElectricEye | 442 | 12672176 | #This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import datetime
import os
import pytest
import sys
import botocore
from botocore.stub import Stubber, ANY
from . import context
from auditors.aws.Amazon_EFS_Auditor import (
efs_filesys_encryption_check,
describe_file_systems,
efs_filesys_policy_check,
efs
)
describe_file_systems = {
"FileSystems": [{
"FileSystemId": "MyEFS",
"OwnerId": "Owner12345",
"CreationToken": 'egCreationToken',
"CreationTime": '2015-01-01',
"LifeCycleState": 'available',
"NumberOfMountTargets": 1,
"SizeInBytes": {'Value': 123,'Timestamp': '2015-01-01','ValueInIA': 123,'ValueInStandard': 123},
"PerformanceMode": "generalPurpose",
"Encrypted": True,
"Tags": [{'Key': 'EgKey', 'Value': 'EgValue'}]
}]
}
describe_file_systems_blank = {
"FileSystems": []
}
describe_file_systems_enc_false = {
"FileSystems": [{
"FileSystemId": "MyEFS",
"OwnerId": "Owner12345",
"CreationToken": 'egCreationToken',
"CreationTime": '2015-01-01',
"LifeCycleState": 'available',
"NumberOfMountTargets": 1,
"SizeInBytes": {'Value': 123,'Timestamp': '2015-01-01','ValueInIA': 123,'ValueInStandard': 123},
"PerformanceMode": "generalPurpose",
"Encrypted": False,
"Tags": [{'Key': 'EgKey', 'Value': 'EgValue'}]
}]
}
file_system_policy = {
"FileSystemId": 'MyEFS',
"Policy": '{"Version": "2012-10-17", \
"Id": "ExamplePolicy01", \
"Statement": [ \
{ "Sid": "ExampleSatement01", \
"Effect": "Allow", \
"Principal": { \
"AWS": "arn:aws:iam::111122223333:user/CarlosSalazar"}, \
"Action": [ \
"elasticfilesystem:ClientMount", \
"elasticfilesystem:ClientWrite"], \
"Resource": "arn:aws:elasticfilesystem:us-east-2:111122223333:file-system/MyEFS", \
"Condition": {"Bool": {"aws:SecureTransport": "true"}}}]}'
}
@pytest.fixture(scope="function")
def efs_stubber():
efs_stubber = Stubber(efs)
efs_stubber.activate()
yield efs_stubber
efs_stubber.deactivate()
def test_efs_encryption_true(efs_stubber):
efs_stubber.add_response("describe_file_systems", describe_file_systems)
results = efs_filesys_encryption_check(
cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws"
)
for result in results:
if "MyEFS" in result["Id"]:
assert result["RecordState"] == "ARCHIVED"
else:
assert False
efs_stubber.assert_no_pending_responses()
def test_efs_encryption_false(efs_stubber):
efs_stubber.add_response("describe_file_systems", describe_file_systems_enc_false)
results = efs_filesys_encryption_check(
cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws"
)
for result in results:
if "MyEFS" in result["Id"]:
assert result["RecordState"] == "ACTIVE"
else:
assert False
efs_stubber.assert_no_pending_responses()
def test_efs_policy(efs_stubber):
efs_stubber.add_response("describe_file_systems", describe_file_systems)
efs_stubber.add_response("describe_file_system_policy", file_system_policy)
results = efs_filesys_policy_check(
cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws"
)
for result in results:
if "MyEFS" in result["Id"]:
assert result["RecordState"] == "ARCHIVED"
else:
assert False
efs_stubber.assert_no_pending_responses()
def test_efs_no_policy(efs_stubber):
efs_stubber.add_response("describe_file_systems", describe_file_systems)
efs_stubber.add_client_error("describe_file_system_policy", 'FileSystemNotFound')
results = efs_filesys_policy_check(
cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws"
)
for result in results:
if "MyEFS" in result["Id"]:
assert result["RecordState"] == "ACTIVE"
else:
assert False
efs_stubber.assert_no_pending_responses()
def test_efs_no_fs(efs_stubber):
efs_stubber.add_response("describe_file_systems", describe_file_systems_blank)
results = efs_filesys_policy_check(
cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws"
)
assert len(list(results)) == 0
efs_stubber.assert_no_pending_responses() |
apps/yolo/pycocoEvalDemo.py | Saums/ml-suite | 334 | 12672194 | #!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import os,sys
## THIS FILE CANNOT BE USED UNLESS THE USER CLONES the COCOAPI parallel to the MLSUITE
## AND THEY DONWLOAD THE VAL2014 annotations, and images
## Also, the PythonAPI directory needs to be built
# Bring in the COCO API for managing the coco dataset
sys.path.insert(0,os.path.abspath("../../../cocoapi/PythonAPI"))
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
annType = ['segm','bbox','keypoints']
annType = annType[1] #specify type here
prefix = 'person_keypoints' if annType=='keypoints' else 'instances'
print 'Running demo for *%s* results.'%(annType)
#initialize COCO ground truth api
dataDir='../../../cocoapi'
dataType='val2014'
annFile = '%s/annotations/%s_%s.json'%(dataDir,prefix,dataType)
cocoGt=COCO(annFile)
#initialize COCO detections api
#resFile='%s/results/%s_%s_fake%s100_results.json'
#resFile = resFile%(dataDir, prefix, dataType, annType)
resFile = './results.json'
cocoDt=cocoGt.loadRes(resFile)
imgIds=sorted(cocoGt.getImgIds())
# running evaluation
cocoEval = COCOeval(cocoGt,cocoDt,annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
|
OrderedDict_tutorial.py | twtrubiks/python-notes | 106 | 12672303 | <reponame>twtrubiks/python-notes<gh_stars>100-1000
# Python 3.6 introduced a new implementation of dict.
# dict now keeps its items ordered as well.
# An OrderedDict is a dictionary subclass that remembers the order in which its contents are added.
from collections import OrderedDict
if __name__ == "__main__":
order_dic = OrderedDict()
order_dic['a'] = 'A'
order_dic['b'] = 'B'
order_dic['c'] = 'C'
order_dic['d'] = 'D'
order_dic['e'] = 'E'
for k, v in order_dic.items():
print(k, v)
# https://docs.python.org/3/library/collections.html#collections.OrderedDict.popitem
# last=True -> LIFO
# last=False -> FIFO
print(
order_dic.popitem(last=True) # -> ('e', 'E')
)
print(
order_dic.popitem(last=False) # -> ('a', 'A')
)
# https://docs.python.org/3/library/collections.html#collections.OrderedDict.move_to_end
# last=True -> The item is moved to the right end
# last=False -> The item is moved to the beginning
print('origin:', order_dic) # OrderedDict([('b', 'B'), ('c', 'C'), ('d', 'D')])
order_dic.move_to_end('b', last=True)
print(order_dic) # OrderedDict([('c', 'C'), ('d', 'D'), ('b', 'B')])
order_dic.move_to_end('b', last=False)
print(order_dic) # OrderedDict([('b', 'B'), ('c', 'C'), ('d', 'D')]) |
mutpy/test_runners/__init__.py | f-str/mutpy | 284 | 12672355 | <filename>mutpy/test_runners/__init__.py
from .unittest_runner import UnittestTestRunner
def pytest_installed():
import importlib
pytest_loader = importlib.find_loader('pytest')
return pytest_loader is not None
class TestRunnerNotInstalledException(Exception):
pass
def __pytest_not_installed(*args, **kwargs):
raise TestRunnerNotInstalledException(
'Pytest is not installed. Please run "pip install pytest" to resolve this issue.'
)
if pytest_installed():
from .pytest_runner import PytestTestRunner
else:
PytestTestRunner = __pytest_not_installed
|
tests/__init__.py | sjoerdk/sitdown | 101 | 12672371 | <reponame>sjoerdk/sitdown
from pathlib import Path
BASE_PATH = Path(__file__).parent.absolute()
RESOURCE_PATH = BASE_PATH / "resources"
|
peering/migrations/0019_router_netbox_device_id.py | schiederme/peering-manager | 173 | 12672426 | # Generated by Django 2.1.3 on 2018-11-08 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("peering", "0018_auto_20181014_1612")]
operations = [
migrations.AddField(
model_name="router",
name="netbox_device_id",
field=models.PositiveIntegerField(blank=True, default=0),
)
]
|
reudom/testdata/aes.py | BarryYBL/reudom | 393 | 12672442 | import requests
aes_url = 'http://tool.chacuo.net/cryptaes'
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36'
}
ECB = 'ecb'
PKC = 'pkcs5'
BLOCK = '128'
PWD = '<PASSWORD>'
IV = '123456'
O = '0'
S = 'gb2312'
T = '0'
ARG = 'm='+ECB+'_pad='+PKC+'_block='+BLOCK+'_p='+PWD+'_i='+IV+'_o='+'O'+'_s='+'gb2312'+'_t='+'0'
print(ARG)
data = {
'data': '土豆',
'type': 'aes',
'arg': ARG
}
result = requests.post(url=aes_url, headers=header, data=data)
print(result.text)
|
vendor/cloud.google.com/go/httpreplay/cmd/httpr/examples/python/httpr-demo.py | maxnordlund/transfer.sh | 3,723 | 12672460 | from __future__ import print_function
import sys
from google.auth.credentials import AnonymousCredentials
from google.cloud import storage
if len(sys.argv)-1 != 3:
print('args: PROJECT BUCKET record|replay')
sys.exit(1)
project = sys.argv[1]
bucket_name = sys.argv[2]
mode = sys.argv[3]
if mode == 'record':
creds = None # use default creds for demo purposes; not recommended
client = storage.Client(project=project)
elif mode == 'replay':
creds = AnonymousCredentials()
else:
print('want record or replay')
sys.exit(1)
client = storage.Client(project=project, credentials=creds)
bucket = client.get_bucket(bucket_name)
print('bucket %s created %s' %(bucket.id, bucket.time_created))
|
benchmarks/20_quantity.py | KOLANICH-libs/pint | 1,545 | 12672465 | import itertools as it
import operator
import pint
from . import util
units = ("meter", "kilometer", "second", "minute", "angstrom")
all_values = ("int", "float", "complex")
all_values_q = tuple(
"%s_%s" % (a, b) for a, b in it.product(all_values, ("meter", "kilometer"))
)
op1 = (operator.neg, operator.truth)
op2_cmp = (operator.eq,) # operator.lt)
op2_math = (operator.add, operator.sub, operator.mul, operator.truediv)
ureg = None
data = {}
def setup(*args):
global ureg, data
data["int"] = 1
data["float"] = 1.0
data["complex"] = complex(1, 2)
ureg = pint.UnitRegistry(util.get_tiny_def())
for key in all_values:
data[key + "_meter"] = data[key] * ureg.meter
data[key + "_kilometer"] = data[key] * ureg.kilometer
def time_build_by_mul(key):
data[key] * ureg.meter
time_build_by_mul.params = all_values
def time_op1(key, op):
op(data[key])
time_op1.params = [all_values_q, op1]
def time_op2(keys, op):
key1, key2 = keys
op(data[key1], data[key2])
time_op2.params = [tuple(it.product(all_values_q, all_values_q)), op2_math + op2_cmp]
|
src/genie/libs/parser/iosxe/tests/ShowIPAlias/cli/equal/golden_output3_expected.py | balmasea/genieparser | 204 | 12672466 | expected_output = {
"vrf": {
"L3VPN-1538": {
"index": {1: {"address_type": "Interface", "ip_address": "192.168.10.254"}}
}
}
}
|
windows_build/generate_ver_info.py | hugmyndakassi/hvmi | 677 | 12672467 | <gh_stars>100-1000
#
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import pathlib
import subprocess
import sys
import platform
from build_info import write_build_info
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--meta-file", help="Path to the project-meta-info.in file", required=True
)
parser.add_argument("--build", help="Built number", required=False, default=None)
parser.add_argument("--out", help="Path to the output JSON file", required=True)
parser.add_argument(
"--overwrite",
help="Overwrite curent ver.h file. Default is False",
action="store_true",
default=False,
)
return parser
def get_version(metainfo):
with open(metainfo, mode="r") as metafile:
for line in metafile:
if "project_version" in line:
raw_ver = line.strip().split()[-1]
ver_components = raw_ver.split(".")
if len(ver_components) != 3:
return None
major = ver_components[0]
minor = ver_components[1]
revision = ver_components[2].replace(")", "")
return (major, minor, revision)
return None
def get_git_branch():
return (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
def get_git_revision():
return (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.decode("utf-8")
.strip()
)
def get_git_commit_count():
return (
subprocess.check_output(["git", "rev-list", "--count", "HEAD"])
.decode("utf-8")
.strip()
)
def get_machine_name():
return platform.node()
def main(argv):
args = get_argparser().parse_args(argv)
out_file = pathlib.Path(args.out)
if out_file.exists():
if not args.overwrite:
print(f"Version file at {out_file} already exists! Will not overwrite it")
return 0
print(f"Version file at {out_file} already exists! Will overwrite it")
ver = get_version(args.meta_file)
if not ver:
print(f"ERROR: Coult not extract version info from {args.meta_file}")
return 1
(major, minor, revision) = ver
build = args.build
if build is None:
# "Sensible" default for local builds
build = get_git_commit_count()
branch = get_git_branch()
changeset = get_git_revision()
build_machine = get_machine_name()
build_info = {}
build_info["major"] = major
build_info["minor"] = minor
build_info["revision"] = revision
build_info["build"] = build
build_info["changeset"] = changeset
build_info["branch"] = branch
build_info["build_machine"] = build_machine
write_build_info(out_file, build_info)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
lightbus/internal_apis.py | gcollard/lightbus | 178 | 12672600 | from lightbus.api import Api, Event
class LightbusStateApi(Api):
"""The API for the state plugin"""
worker_started = Event(
parameters=[
"service_name",
"process_name",
"metrics_enabled",
"api_names",
"listening_for",
"timestamp",
"ping_interval",
]
)
worker_ping = Event(
parameters=[
"service_name",
"process_name",
"metrics_enabled",
"api_names",
"listening_for",
"timestamp",
"ping_interval",
]
)
worker_stopped = Event(parameters=["process_name", "timestamp"])
class Meta:
name = "internal.state"
internal = True
class LightbusMetricsApi(Api):
"""The API for the metrics plugin"""
rpc_call_sent = Event(
parameters=[
"service_name",
"process_name",
"id",
"api_name",
"procedure_name",
"kwargs",
"timestamp",
]
)
rpc_call_received = Event(
parameters=["service_name", "process_name", "id", "api_name", "procedure_name", "timestamp"]
)
rpc_response_sent = Event(
parameters=[
"service_name",
"process_name",
"id",
"api_name",
"procedure_name",
"result",
"timestamp",
]
)
rpc_response_received = Event(
parameters=["service_name", "process_name", "id", "api_name", "procedure_name", "timestamp"]
)
event_fired = Event(
parameters=[
"service_name",
"process_name",
"event_id",
"api_name",
"event_name",
"kwargs",
"timestamp",
]
)
event_received = Event(
parameters=[
"service_name",
"process_name",
"event_id",
"api_name",
"event_name",
"kwargs",
"timestamp",
]
)
event_processed = Event(
parameters=[
"service_name",
"process_name",
"event_id",
"api_name",
"event_name",
"kwargs",
"timestamp",
]
)
class Meta:
name = "internal.metrics"
internal = True
|
py2deb/cli.py | arrikto/py2deb | 309 | 12672607 | # Command line interface for the `py2deb' program.
#
# Author: <NAME> <<EMAIL>>
# Last Change: May 22, 2017
# URL: https://py2deb.readthedocs.io
"""
Usage: py2deb [OPTIONS] ...
Convert Python packages to Debian packages according to the given
command line options (see below). The command line arguments are the
same as accepted by the `pip install' command because py2deb invokes
pip during the conversion process. This means you can name the
package(s) to convert on the command line but you can also use
`requirement files' if you prefer.
If you want to pass command line options to pip (e.g. because you want
to use a custom index URL or a requirements file) then you will need
to tell py2deb where the options for py2deb stop and the options for
pip begin. In such cases you can use the following syntax:
$ py2deb -r /tmp -- -r requirements.txt
So the `--' marker separates the py2deb options from the pip options.
Supported options:
-c, --config=FILENAME
Load a configuration file. Because the command line arguments are processed
in the given order, you have the choice and responsibility to decide if
command line options override configuration file options or vice versa.
Refer to the documentation for details on the configuration file format.
The default configuration files /etc/py2deb.ini and ~/.py2deb.ini are
automatically loaded if they exist. This happens before environment
variables and command line options are processed.
Can also be set using the environment variable $PY2DEB_CONFIG.
-r, --repository=DIRECTORY
Change the directory where *.deb archives are stored. Defaults to
the system wide temporary directory (which is usually /tmp). If
this directory doesn't exist py2deb refuses to run.
Can also be set using the environment variable $PY2DEB_REPOSITORY.
--use-system-package=PYTHON_PACKAGE_NAME,DEBIAN_PACKAGE_NAME
Exclude a Python package (the name before the comma) from conversion and
replace references to the Python package with a specific Debian package
name. This allows you to use system packages for specific Python
requirements.
--name-prefix=PREFIX
Set the name prefix used during the name conversion from Python to
Debian packages. Defaults to `python'. The name prefix and package
names are always delimited by a dash.
Can also be set using the environment variable $PY2DEB_NAME_PREFIX.
--no-name-prefix=PYTHON_PACKAGE_NAME
Exclude a Python package from having the name prefix applied
during the package name conversion. This is useful to avoid
awkward repetitions.
--rename=PYTHON_PACKAGE_NAME,DEBIAN_PACKAGE_NAME
Override the package name conversion algorithm for the given pair
of package names. Useful if you don't agree with the algorithm :-)
--install-prefix=DIRECTORY
Override the default system wide installation prefix. By setting
this to anything other than `/usr' or `/usr/local' you change the
way py2deb works. It will build packages with a file system layout
similar to a Python virtual environment, except there will not be
a Python executable: The packages are meant to be loaded by
modifying Python's module search path. Refer to the documentation
for details.
Can also be set using the environment variable $PY2DEB_INSTALL_PREFIX.
--install-alternative=LINK,PATH
Use Debian's `update-alternatives' system to add an executable
that's installed in a custom installation prefix (see above) to
the system wide executable search path. Refer to the documentation
for details.
--python-callback=EXPRESSION
Set a Python callback to be called during the conversion process. Refer to
the documentation for details about the use of this feature and the syntax
of EXPRESSION.
Can also be set using the environment variable $PY2DEB_CALLBACK.
--report-dependencies=FILENAME
Add the Debian relationships needed to depend on the converted
package(s) to the given control file. If the control file already
contains relationships the additional relationships will be added
to the control file; they won't overwrite existing relationships.
-y, --yes
Instruct pip-accel to automatically install build time dependencies
where possible. Refer to the pip-accel documentation for details.
Can also be set using the environment variable $PY2DEB_AUTO_INSTALL.
-v, --verbose
Make more noise :-).
-h, --help
Show this message and exit.
"""
# Standard library modules.
import getopt
import logging
import os
import sys
# External dependencies.
import coloredlogs
from deb_pkg_tools.control import patch_control_file
from humanfriendly.terminal import usage, warning
# Modules included in our package.
from py2deb.converter import PackageConverter
# Initialize a logger.
logger = logging.getLogger(__name__)
def main():
"""Command line interface for the ``py2deb`` program."""
# Configure terminal output.
coloredlogs.install()
try:
# Initialize a package converter.
converter = PackageConverter()
# Parse and validate the command line options.
options, arguments = getopt.getopt(sys.argv[1:], 'c:r:yvh', [
'config=', 'repository=', 'use-system-package=', 'name-prefix=',
'no-name-prefix=', 'rename=', 'install-prefix=',
'install-alternative=', 'python-callback=', 'report-dependencies=',
'yes', 'verbose', 'help',
])
control_file_to_update = None
for option, value in options:
if option in ('-c', '--config'):
converter.load_configuration_file(value)
elif option in ('-r', '--repository'):
converter.set_repository(value)
elif option == '--use-system-package':
python_package_name, _, debian_package_name = value.partition(',')
converter.use_system_package(python_package_name, debian_package_name)
elif option == '--name-prefix':
converter.set_name_prefix(value)
elif option == '--no-name-prefix':
converter.rename_package(value, value)
elif option == '--rename':
python_package_name, _, debian_package_name = value.partition(',')
converter.rename_package(python_package_name, debian_package_name)
elif option == '--install-prefix':
converter.set_install_prefix(value)
elif option == '--install-alternative':
link, _, path = value.partition(',')
converter.install_alternative(link, path)
elif option == '--python-callback':
converter.set_python_callback(value)
elif option == '--report-dependencies':
control_file_to_update = value
if not os.path.isfile(control_file_to_update):
msg = "The given control file doesn't exist! (%s)"
raise Exception(msg % control_file_to_update)
elif option in ('-y', '--yes'):
converter.set_auto_install(True)
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-h', '--help'):
usage(__doc__)
return
else:
assert False, "Unhandled option!"
except Exception as e:
warning("Failed to parse command line arguments: %s", e)
sys.exit(1)
# Convert the requested package(s).
try:
if arguments:
archives, relationships = converter.convert(arguments)
if relationships and control_file_to_update:
patch_control_file(control_file_to_update, dict(depends=relationships))
else:
usage(__doc__)
except Exception:
logger.exception("Caught an unhandled exception!")
sys.exit(1)
|
support/go-NN-master/engine/SelfPlay.py | sjkim04/AlphaGOZero-python-tensorflow | 325 | 12672651 |
# Self play games as used by DeepMind to train AlphaGo's value network. Play a
# policy against itself, but insert single random move somewhere in the game.
# Use the position immediately after the random move together with the final
# game result as a single training example for the value network.
def run_self_play_game_with_random_move(engine, N=19, komi=7.5):
board = Board(N)
engine.clear_board()
engine.set_board_size(N)
engine.set_komi(komi)
random_policy = RandomPolicy()
example_feature_planes = None
example_color_to_play = None
random_move_num = np.randint(0, 450)
print "random_move_num = ", random_move_num
move_num = 0
consecutive_passes = 0
result = None
while consecutive_passes < 2:
if move_num == random_move_num:
move = random_policy.pick_move(board)
board.play_move(move)
engine.move_was_played(move)
example_color_to_play = board.color_to_play
print "chose random move (%d,%d) for %s on move #%d" % (move.x, move.y, color_names[example_color_to_play], move_num)
example_feature_planes = Features.make_feature_planes_stones_3liberties_4history_ko(board, example_color_to_play)
else:
move = engine.generate_move(board)
if move.is_resign():
result = "B+Resign" if board.color_to_play == Color.Black else "W+Resign"
break
elif move.is_pass():
consecutive_passes += 1
else:
consecutive_passes = 0
board.play_move(move)
move_num += 1
if result == None:
result = engine.final_score()
print "self play game finished. result is", result
if example_feature_planes != None:
winner = Color.Black if "B+" in result else Color.White
example_outcome = +1 if winner == example_color_to_play else -1
print "produced example with example_outcome = %d" % example_outcome
return (example_feature_planes, example_outcome)
else:
print "game didn't go long enough: no example produced."
return None
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/arguments_differ_py3.py | ciskoinch8/vimrc | 463 | 12672669 | # pylint: disable=missing-docstring,too-few-public-methods
class AbstractFoo:
def kwonly_1(self, first, *, second, third):
"Normal positional with two positional only params."
def kwonly_2(self, *, first, second):
"Two positional only parameter."
def kwonly_3(self, *, first, second):
"Two positional only params."
def kwonly_4(self, *, first, second=None):
"One positional only and another with a default."
def kwonly_5(self, *, first, **kwargs):
"Keyword only and keyword variadics."
def kwonly_6(self, first, second, *, third):
"Two positional and one keyword"
class Foo(AbstractFoo):
def kwonly_1(self, first, *, second): # [arguments-differ]
"One positional and only one positional only param."
def kwonly_2(self, first): # [arguments-differ]
"Only one positional parameter instead of two positional only parameters."
def kwonly_3(self, first, second): # [arguments-differ]
"Two positional params."
def kwonly_4(self, first, second): # [arguments-differ]
"Two positional params."
def kwonly_5(self, *, first): # [arguments-differ]
"Keyword only, but no variadics."
def kwonly_6(self, *args, **kwargs): # valid override
"Positional and keyword variadics to pass through parent params"
class Foo2(AbstractFoo):
def kwonly_6(self, first, *args, **kwargs): # valid override
"One positional with the rest variadics to pass through parent params"
|
coders/nyc_parks.py | susannahsoon/oldperth | 302 | 12672700 | <reponame>susannahsoon/oldperth<gh_stars>100-1000
#!/usr/bin/python
#
# Look for well-known NYC parks.
from collections import defaultdict
import fileinput
import re
import sys
import json
if __name__ == '__main__':
sys.path += (sys.path[0] + '/..')
import coders.registration
import record
parks = {
'Bronx Park': (40.856389, -73.876667),
'Claremont Park': (40.840546, -73.907469),
'Crotona Park': (40.8388, -73.8952),
# 'Fulton Park': None,
'Morris Park Race Track': (40.85, -73.855556),
'Poe Park': (40.865278, -73.894444),
'Pulaski Park': (40.805239, -73.924409),
'Starlight Park': (40.834176, -73.881968),
'Highland Park': (40.688370, -73.887480),
'Marine Park': (40.59804, -73.92083),
'Prospect Park Plaza': (40.6743, -73.9702),
'Prospect Park': (40.66143, -73.97035), # BIG!
'Battery Park': (40.703717, -74.016094),
'Bryant Park': (40.753792, -73.983607),
'Central Park': (40.782865, -73.965355), # BIG!
'Colonial Park': (40.824293, -73.942172),
'Cooper Park': (40.716014, -73.937268),
'Jefferson Park': (40.793366, -73.935247),
'Morningside Park': (40.805093, -73.959127),
'Riverside Park': (40.801234, -73.972310),
'Astoria Park': (40.775934, -73.925275),
'Baisley Park': (40.677778, -73.784722),
'Chisholm Park': (40.792833, -73.851857),
'Rainey Park': (40.766070, -73.940758),
'Barrett Park': (40.6251, -74.1157),
'Flushing Meadow Park': (40.739714, -73.840785),
'City Hall Park': (40.713160, -74.006389),
'Pelham Bay Park': (40.861500, -73.797200),
'Van Cortlandt Park': (40.894709, -73.890918),
'Inwood Hill Park': (40.871542, -73.925695),
'Carl Schurz Park': (40.775130, -73.943697),
'Jacob Riis Park': (40.566623, -73.876081),
'High Bridge Park': (40.843104, -73.932910),
'Fort Tryon Park': (40.861619, -73.933622),
'Fort Greene Park': (40.690344, -73.973833),
'Morris Park': (40.852201, -73.850728), # Neighborhood
'Fort Washington Park': (40.849475, -73.946673),
'Washington Square Park': (40.730823, -73.997332),
'Mount Morris Park': (40.804508, -73.944046),
'Union Square Park': (40.735708, -73.990442),
'Stuyvesant Square Park': (40.733611, -73.984000),
'Juniper Valley Park': (40.720101, -73.881488),
'Starlight Amusement Park': (40.834176, -73.881968),
'Seton Falls Park': (40.886753, -73.838231),
'Madison Square Park': (40.742216, -73.988036),
'Golden City Park': (40.629194, -73.883929),
'Golden City Amusement Park': (40.629194, -73.883929),
'Corlears Hook Park': (40.711697, -73.979697),
'College Point Park': (40.785778, -73.846501),
'Marine Park at Marine Park': (40.595700, -73.921198),
'Hamilton Fish Park': (40.720029, -73.981559),
'Garden City Amusement Park': (40.629194, -73.883929),
# 'Fulton Park': (),
'Fort Green Park': (40.690344, -73.973833),
'Canarsie Beach Park': (40.629194, -73.883929)
}
central_park = {
'The Pond': (40.766014, -73.974004),
'Pond in winter': (40.766014, -73.974004),
'The Lake': (40.776223, -73.973085),
'Reservoirs - Lower reservoir': (40.781289, -73.966664),
'Reservoirs - Upper reservoir': (40.785719, -73.963902),
# 'Pathways': (),
'The Mall': (40.772352, -73.971590),
# 'Playgrounds': (),
# 'Transverse roads': (),
# 'Miscellaneous': (),
'Bridal path': (40.796840, -73.957826),
'[View of the Arsenal Building]': (40.767618, -73.971311),
# 'The Seal Pool': (),
'The Obelisk': (40.779638, -73.965400),
'Transportation of the Obelisk': (40.779638, -73.965400),
'Terrace Fountain and the Lake': (40.753982, -73.984127),
# 'looking west from Fifth Avenue apartment': (),
# 'Sailboat pond': (),
# 'Rustic Arbor': (),
'<NAME>': (40.796464, -73.951596),
# 'West Drive': (),
# 'The Sailboat Pool': (),
# 'Drives': (),
# 'Cliffs': (),
}
islands = {
'Barren Island': (40.592778, -73.893056),
'Barren': (40.592778, -73.893056),
'Bedloe\'s Island': (40.690050, -74.045068),
'City Island': (40.846820, -73.787498),
'City': (40.846820, -73.787498),
'Coney Island beach': (40.572130, -73.979330),
'Coney Island pier': (40.571413, -73.983822),
'Coney Island': (40.574926, -73.985941),
'Coney': (40.574926, -73.985941),
'Ellis Island': (40.699472, -74.039560),
'Ellis': (40.699472, -74.039560),
'Governor\'s Island': (40.689450, -74.016792),
'Governors Island': (40.689450, -74.016792),
'Governors': (40.689450, -74.016792),
'Hart\'s Island': (40.853627, -73.770585),
'High Island': (40.859525, -73.785639),
'Hoffman Island': (40.578873, -74.053688),
'Hoffman': (40.578873, -74.053688),
'Hunter Island': (40.875028, -73.790219),
'Hunter': (40.875028, -73.790219),
'North Brother Island': (40.800720, -73.898137),
'North Brothers Island': (40.800720, -73.898137),
'North Brothers': (40.800720, -73.898137),
'North Brother': (40.800720, -73.898137),
'Plumb Island': (40.584722, -73.915000),
'Randall\'s Island': (40.793227, -73.921286),
'Randalls Island': (40.793227, -73.921286),
'Rikers Island': (40.793128, -73.886010),
'Shooters Island': (40.643333, -74.159722),
'South Brother Island': (40.796402, -73.898137),
'South Brother': (40.796402, -73.898137),
'Ward\'s Island': (40.793227, -73.921286),
'Welfare Island': (40.762161, -73.949964),
'Welfare': (40.762161, -73.949964)
}
bridges = {
'Brooklyn Bridge': (40.706096, -73.996823),
'Triborough Bridge': (40.788232, -73.927871),
'triborough Bridge': (40.788232, -73.927871),
'Triborough Bridge and': (40.788232, -73.927871),
'Queensboro Bridge': (40.756732, -73.954224),
'Queensborough Bridge': (40.756732, -73.954224),
'Manhattan Bridge': (40.707471, -73.990774),
'George Washington Bridge': (40.850425, -73.945942),
'Washington Bridge': (40.846944, -73.928056),
'Hell Gate Bridge': (40.782596, -73.921913),
'Williamsburg Bridge': (40.713690, -73.972616),
'Harlem River Bridges': (40.815139, -73.933096),
'Bayonne Bridge': (40.639706, -74.142963),
'Kill Van Kull Bridge': (40.639706, -74.142963),
'High Bridge': (40.842308, -73.930277),
'Penny Bridge': (40.72777, -73.9292),
'Washington Bridge over Harlem River': (40.846944, -73.928056),
'Verrazano Narrows Bridge': (40.606589, -74.044648),
'Triborough and Hell Gate Bridge': (40.788232, -73.927871),
'Northern Boulevard Bridge': (40.763428, -73.751743),
'Marine Parkway Bridge': (40.573697, -73.885145),
'Lemon Creek Bridge': (40.521727, -74.202524),
'Kosciusko Bridge': (40.72777, -73.9292),
'Henry Hudson Bridge': (40.877713, -73.922302),
'Gowanus Canal Bridge': (40.674106, -73.996503),
'Ninth Street drawbridge Bridge': (40.674106, -73.996503),
'Vernon Boulevard Bridge': (40.760673, -73.943330),
'Triborough and Hell Gate Bridges': (40.788232, -73.927871),
'Henry Hudson Memorial Bridge': (40.877713, -73.922302),
'Goethals Bridge': (40.635381, -74.195978),
# 2 'Flushing Creek Bridge': (),
# 2 'City Island Bridge': (),
# 2 'Broadway Bridge': (),
# 2 'Bridge over Coney Island': ()
# 1 Throgs Neck Bridge
# 1 Strongs Causeway Bridge
# 1 Pelham Bridge
# 1 Outerbridge Crossing site Bridge
# 1 Outerbridge Crossing Bridge
# 1 New York and Putnam Railroad Bridge
# 1 New York Central Railroad Bridge
# 1 Metropolitan Avenue Bridge
# 1 Madison Avenue Bridge
# 1 Kosciusko Bridge over Newtown Creek in
# 1 Kings Bridge
# 1 Hells Gate Bridge
# 1 Hell Gate and Triborough Bridge
# 1 Harlem River Bridge
# 1 Flushing River Bridge
# 1 Farmers Bridge
# 1 East River Bridge
# 1 Cross Bay Veterans Memorial Bridge
# 1 Cross Bay Boulevard Bridge
# 1 Brroklyn Bridge
# 1 Brooklyn and Manhattan Bridges over East
# 1 Baltimore and Ohio Railroad Bridge
}
beaches = {
}
# Bridges
# "East River - River scenes - View of Brooklyn Bridge and financial district from Manhattan Bridge"
# "East River - River scenes - Brooklyn Bridge -Early shipping."
# Beaches
# - Midland Beach, Staten Island, NY
boros_re = '(?:New York|Manhattan|Brooklyn|Bronx|Queens|Staten Island)'
park_re = r'^%s: ([A-Za-z ]+ Park)(?: |$)' % boros_re
non_parks_re = r'Park (?:Avenue|West|East|North|South|Court|Place|Row|Terrace|Blvd|Boulevard)'
island_re = r'^Islands - ([A-Za-z ]+) '
bridge_re = r'^Bridges - ([A-Za-z ]+) '
missing_parks = defaultdict(int)
missing_islands = defaultdict(int)
missing_bridges = defaultdict(int)
class NycParkCoder:
def __init__(self):
pass
def codeRecord(self, r):
if r.source() != 'Milstein Division': return None
title = re.sub(r'\.$', '', r.title())
m = re.search(park_re, title)
if m:
park = m.group(1)
if not re.search(non_parks_re, title):
if park not in parks:
missing_parks[park] += 1
else:
latlon = None
if park == 'Central Park':
for place in central_park:
if ('Central Park - %s' % place) in title:
latlon = central_park[place]
if not latlon:
latlon = parks[park]
return {
'address': '@%s,%s' % latlon,
'source': m.group(0),
'type': 'point_of_interest'
}
m = re.search(island_re, title)
if m:
island = m.group(1)
if island not in islands:
missing_islands[island] += 1
else:
latlon = islands[island]
return {
'address': '@%s,%s' % latlon,
'source': m.group(0),
'type': 'point_of_interest'
}
m = re.search(bridge_re, title)
if m:
bridge = m.group(1)
if not 'Bridge' in bridge or 'bridge' in bridge:
bridge += ' Bridge'
if bridge not in bridges:
missing_bridges[bridge] += 1
else:
latlon = bridges[bridge]
return {
'address': '@%s,%s' % latlon,
'source': m.group(0),
'type': 'point_of_interest'
}
return None
def _getLatLonFromGeocode(self, geocode, data):
for result in geocode['results']:
# data['type'] is something like 'address' or 'intersection'.
if data['type'] in result['types']:
loc = result['geometry']['location']
return (loc['lat'], loc['lng'])
def getLatLonFromGeocode(self, geocode, data, r):
latlon = self._getLatLonFromGeocode(geocode, data)
if not latlon:
return None
return latlon
def finalize(self):
for missing in [missing_parks, missing_islands, missing_bridges]:
vs = [(v, k) for k, v in missing.iteritems()]
for v, k in reversed(sorted(vs)):
sys.stderr.write('%4d\t%s\n' % (v, k))
def name(self):
return 'nyc-parks'
coders.registration.registerCoderClass(NycParkCoder)
# For fast iteration
if __name__ == '__main__':
coder = NycParkCoder()
r = record.Record()
num_ok, num_bad = 0, 0
for line in fileinput.input():
addr = line.strip()
if not addr: continue
r.tabular = {
'i': ['PHOTO_ID'],
'a': ['Milstein Division'],
't': [addr]
}
result = coder.codeRecord(r)
if result:
num_ok += 1
print '"%s" -> %s' % (addr, result)
else:
num_bad += 1
coder.finalize()
sys.stderr.write('Parsed %d / %d = %.4f records\n' % (
num_ok, num_ok + num_bad, 1. * num_ok / (num_ok + num_bad)))
|
task11_kaggle/create_submission.py | Rojanson/stepik-dl-nlp | 120 | 12672704 | import argparse
import pandas as pd
import numpy as np
import string
import pickle
from nltk.util import ngrams
def generate_csv(input_file='predicted_titles.csv', output_file='submission.csv', voc_file='vocs.pkl'):
'''
Generates file in format required for submitting result to Kaggle
Parameters:
input_file (str) : path to csv file with your predicted titles.
Should have two fields: abstract and title
output_file (str) : path to output submission file
voc_file (str) : path to voc.pkl file
'''
data = pd.read_csv(input_file)
with open(voc_file, 'rb') as voc_file:
vocs = pickle.load(voc_file)
with open(output_file, 'w') as res_file:
res_file.write('Id,Predict\n')
output_idx = 0
for row_idx, row in data.iterrows():
trg = row['title']
trg = trg.translate(str.maketrans('', '', string.punctuation)).lower().split()
trg.extend(['_'.join(ngram) for ngram in list(ngrams(trg, 2)) + list(ngrams(trg, 3))])
VOCAB_stoi = vocs[row_idx]
trg_intersection = set(VOCAB_stoi.keys()).intersection(set(trg))
trg_vec = np.zeros(len(VOCAB_stoi))
for word in trg_intersection:
trg_vec[VOCAB_stoi[word]] = 1
with open(output_file, 'a') as res_file:
for is_word in trg_vec:
res_file.write('{0},{1}\n'.format(output_idx, int(is_word)))
output_idx += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_file',
help='Path to input .csv file (abstract, title)',
type=str,
)
parser.add_argument(
'--output_file',
help='Path to kaggle submission file',
type=str,
)
parser.add_argument(
'--voc_file',
help='Path to voc.pkl file',
type=str,
)
args = parser.parse_args()
generate_csv(args.input_file, args.output_file, args.voc_file)
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/sample_authentication.py | rsdoherty/azure-sdk-for-python | 2,728 | 12672714 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_authentication.py
DESCRIPTION:
This sample demonstrates how to authenticate to the Form Recognizer service.
There are two supported methods of authentication:
1) Use a Form Recognizer API key with AzureKeyCredential from azure.core.credentials
2) Use a token credential from azure-identity to authenticate with Azure Active Directory
See more details about authentication here:
https://docs.microsoft.com/azure/cognitive-services/authentication
USAGE:
python sample_authentication.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) AZURE_CLIENT_ID - the client ID of your active directory application.
4) AZURE_TENANT_ID - the tenant ID of your active directory application.
5) AZURE_CLIENT_SECRET - the secret of your active directory application.
"""
import os
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/Form_1.jpg"
def authentication_with_api_key_credential_document_analysis_client():
# [START create_da_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_analysis_client = DocumentAnalysisClient(endpoint, AzureKeyCredential(key))
# [END create_da_client_with_key]
poller = document_analysis_client.begin_analyze_document_from_url(
"prebuilt-layout", url
)
result = poller.result()
def authentication_with_azure_active_directory_document_analysis_client():
# [START create_da_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
document_analysis_client = DocumentAnalysisClient(endpoint, credential)
# [END create_da_client_with_aad]
poller = document_analysis_client.begin_analyze_document_from_url(
"prebuilt-layout", url
)
result = poller.result()
def authentication_with_api_key_credential_document_model_admin_client():
# [START create_dt_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_model_admin_client = DocumentModelAdministrationClient(endpoint, AzureKeyCredential(key))
# [END create_dt_client_with_key]
info = document_model_admin_client.get_account_info()
def authentication_with_azure_active_directory_document_model_admin_client():
# [START create_dt_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import DocumentModelAdministrationClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
document_model_admin_client = DocumentModelAdministrationClient(endpoint, credential)
# [END create_dt_client_with_aad]
info = document_model_admin_client.get_account_info()
if __name__ == "__main__":
authentication_with_api_key_credential_document_analysis_client()
authentication_with_azure_active_directory_document_analysis_client()
authentication_with_api_key_credential_document_model_admin_client()
authentication_with_azure_active_directory_document_model_admin_client()
|
nntts/models/__init__.py | entn-at/efficient_tts | 111 | 12672730 | <filename>nntts/models/__init__.py
from .efficient_tts import EfficientTTSCNN
from .duration_model import DurationModel
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/b/blacklisted_name.py | ciskoinch8/vimrc | 463 | 12672746 | <gh_stars>100-1000
# pylint: disable=missing-docstring
def baz(): # [disallowed-name]
pass
|
dirigible/sheet/tests/test_dirigible_datetime.py | EnoX1/dirigible-spreadsheet | 168 | 12672775 | <filename>dirigible/sheet/tests/test_dirigible_datetime.py
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from sheet.dirigible_datetime import DateTime
from dirigible.test_utils import ResolverTestCase
class DateTimeTest(ResolverTestCase):
def test_DateTime_subclasses_datetime_dot_datetime(self):
self.assertTrue(isinstance(
DateTime(1979, 10, 8),
datetime.datetime))
|
tests/integrations/subprocess/conftest.py | chuckyQ/briefcase | 917 | 12672794 | from unittest.mock import MagicMock
import pytest
from briefcase.integrations.subprocess import Subprocess
@pytest.fixture
def mock_sub():
command = MagicMock()
command.verbosity = 0
sub = Subprocess(command)
sub._subprocess = MagicMock()
return sub
|
venv/Lib/site-packages/nipype/interfaces/cat12/tests/test_auto_ExtractROIBasedSurfaceMeasures.py | richung99/digitizePlots | 585 | 12672834 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..surface import ExtractROIBasedSurfaceMeasures
def test_ExtractROIBasedSurfaceMeasures_inputs():
input_map = dict(
lh_roi_atlas=dict(
copyfile=False,
field="rdata",
mandatory=True,
),
lh_surface_measure=dict(
copyfile=False,
field="cdata",
mandatory=True,
),
matlab_cmd=dict(),
mfile=dict(
usedefault=True,
),
paths=dict(),
rh_roi_atlas=dict(
copyfile=False,
mandatory=False,
),
rh_surface_measure=dict(
copyfile=False,
mandatory=False,
),
surface_files=dict(
copyfile=False,
mandatory=False,
),
use_mcr=dict(),
use_v8struct=dict(
min_ver="8",
usedefault=True,
),
)
inputs = ExtractROIBasedSurfaceMeasures.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ExtractROIBasedSurfaceMeasures_outputs():
output_map = dict(
label_files=dict(),
)
outputs = ExtractROIBasedSurfaceMeasures.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
adbui/adb_ext.py | hao1032/adbui | 136 | 12672903 | # coding=utf-8
import os
import re
import time
import base64
import logging
import tempfile
class AdbExt(object):
def __init__(self, util):
self.util = util
self.is_helper_ready = False
self.width, self.height = None, None
self.dir_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在的目录绝对路径
self.temp_device_dir_path = '/data/local/tmp'
def init_device_size(self):
if self.width and self.height:
return
out = self.util.shell('wm size') # out like 'Physical size: 1080x1920'
out = re.findall(r'\d+', out)
self.width = int(out[0])
self.height = int(out[1])
def dump(self):
for i in range(5):
xml_str = self.__dump_xml()
if xml_str:
return xml_str
time.sleep(1)
raise NameError('dump xml fail!')
def __dump_xml(self):
# 使用 helper 获取 xml
xml_str = self.run_helper_cmd('layout')
# 使用压缩模式
if not xml_str:
xml_str = self.util.adb('exec-out uiautomator dump --compressed /dev/tty', encoding='')
# 使用非压缩模式
if not xml_str:
xml_str = self.util.adb('exec-out uiautomator dump /dev/tty', encoding='')
if isinstance(xml_str, bytes):
xml_str = xml_str.decode('utf-8')
if 'hierarchy' in xml_str:
start = xml_str.find('<hierarchy')
end = xml_str.rfind('>') + 1
xml_str = xml_str[start: end].strip()
return xml_str
def run_helper_cmd(self, cmd):
"""
执行 helper 的命令,当前 helper 支持 dump xml 和 screenshot
:param cmd:
:return:
"""
if not self.is_helper_ready:
file_names = self.util.shell('ls {}'.format(self.temp_device_dir_path))
if 'adbui' not in file_names:
helper_path = os.path.join(self.dir_path, 'static', 'adbui')
self.push(helper_path, self.temp_device_dir_path)
self.is_helper_ready = True
arg = 'app_process -Djava.class.path=/data/local/tmp/adbui /data/local/tmp com.ysbing.yadb.Main -{}'.format(cmd)
return self.util.shell(arg)
def delete_from_device(self, path):
self.util.shell('rm -rf {}'.format(path))
def screenshot(self, pc_path=None):
out = self.run_helper_cmd('screenshot')
if out and len(out) > 50:
out = base64.b64decode(out)
else: # helper 截图失败,使用 screencap 截图
logging.warning('helper 截图失败')
arg = 'exec-out screencap -p'.format(self.util.sn)
out = self.util.adb(arg, encoding=None) # 这里是 png bytes string
# 保存截图
if pc_path:
if self.util.is_py2:
pc_path = pc_path.decode('utf-8')
if os.path.exists(pc_path): # 删除电脑文件
os.remove(pc_path)
with open(pc_path, 'wb') as f:
f.write(out)
return pc_path
return out
def pull(self, device_path=None, pc_path=None):
return self.util.adb('pull "{}" "{}"'.format(device_path, pc_path))
def push(self, pc_path=None, device_path=None):
return self.util.adb('push "{}" "{}"'.format(pc_path, device_path))
def click(self, x, y):
self.util.shell('input tap {} {}'.format(x, y))
def long_click(self, x, y, duration=''):
"""
长按
:param x: x 坐标
:param y: y 坐标
:param duration: 长按的时间(ms)
:return:
"""
self.util.shell('input touchscreen swipe {} {} {} {} {}'.format(x, y, x, y, duration))
def start(self, pkg):
"""
使用monkey,只需给出包名即可启动一个应用
:param pkg:
:return:
"""
self.util.shell('monkey -p {} 1'.format(pkg))
def stop(self, pkg):
self.util.shell('am force-stop {}'.format(pkg))
def input(self, text):
self.util.shell('input text "{}"'.format(text.replace('&', '\&')))
def back(self, times=1):
while times:
self.util.shell('input keyevent 4')
times -= 1
def home(self):
self.util.shell('input keyevent 3')
def enter(self, times=1):
while times:
self.util.shell('input keyevent 66')
times -= 1
def swipe(self, e1=None, e2=None, start_x=None, start_y=None, end_x=None, end_y=None, duration=" "):
"""
滑动事件,Android 4.4以上可选duration(ms)
usage: swipe(e1, e2)
swipe(e1, end_x=200, end_y=500)
swipe(start_x=0.5, start_y=0.5, e2)
"""
self.init_device_size()
if e1 is not None:
start_x = e1[0]
start_y = e1[1]
if e2 is not None:
end_x = e2[0]
end_y = e2[1]
if 0 < start_x < 1:
start_x = start_x * self.width
if 0 < start_y < 1:
start_y = start_y * self.height
if 0 < end_x < 1:
end_x = end_x * self.width
if 0 < end_y < 1:
end_y = end_y * self.height
self.util.shell('input swipe %s %s %s %s %s' % (str(start_x), str(start_y), str(end_x), str(end_y), str(duration)))
def clear(self, pkg):
"""
重置应用
:param pkg:
:return:
"""
self.util.shell('pm clear {}'.format(pkg))
def wake_up(self):
"""
点亮屏幕
:return:
"""
self.util.shell('input keyevent KEYCODE_WAKEUP')
def unlock(self):
"""
解锁屏幕
:return:
"""
self.util.shell('input keyevent 82')
def grant(self, pkg, permission):
"""
给app赋权限,类似 adb shell pm grant [PACKAGE_NAME] android.permission.PACKAGE_USAGE_STATS
:return:
"""
self.util.shell('pm grant {} {}'.format(pkg, permission))
def install(self, apk_path, with_g=True, with_r=False, user=None):
"""
安装包
:param apk_path:
:param with_g: -g 在一些设备上可以自动授权,默认 true
:param with_r: -r 覆盖安装,默认 false
:param user:
:return:
"""
arg = 'install'
if user:
arg = arg + ' -user {}'.format(user)
if with_g:
arg = arg + ' -g'
if with_r:
arg = arg + ' -r'
self.util.adb('{} "{}"'.format(arg, apk_path), timeout=60 * 5) # 安装较大的包可能比较耗时
def uninstall(self, pkg):
"""
卸载包
:param pkg:
:return:
"""
self.util.adb('uninstall {}'.format(pkg))
def get_name(self, remove_blank=False):
name = self.util.shell('getprop ro.config.marketing_name').strip()
if not name:
name = self.util.shell('getprop ro.product.nickname').strip()
if remove_blank:
name = name.replace(' ', '')
return name
def switch_user(self, user_id, wait_time=5):
self.util.shell('am switch-user {}'.format(user_id))
time.sleep(wait_time)
def list_packages(self, system=False):
"""
返回手机中安装的包
:param system: 是否包含系统包
:return:
"""
with_system = '' if system else '-3'
return self.util.shell('pm list packages {}'.format(with_system))
|
tests/typing/test_fixp_inst.py | bogdanvuk/pygears | 120 | 12672908 | <reponame>bogdanvuk/pygears<filename>tests/typing/test_fixp_inst.py
from math import ceil, floor
from pygears.typing import Fixp, Ufixp, Uint, Int
def test_abs():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
q3_4 = Fixp[3, 4]
assert abs(uq2_3.max) == uq2_3.max
assert abs(q2_3.min) == q3_4(abs(float(q2_3.min)))
def test_add():
uq2_3 = Ufixp[2, 3]
uq2_4 = Ufixp[2, 4]
uq3_4 = Ufixp[3, 4]
uq3_5 = Ufixp[3, 5]
uq4_5 = Ufixp[4, 5]
uq4_6 = Ufixp[4, 6]
q2_3 = Fixp[2, 3]
q2_4 = Fixp[2, 4]
q3_4 = Fixp[3, 4]
q3_5 = Fixp[3, 5]
q4_5 = Fixp[4, 5]
q4_6 = Fixp[4, 6]
q5_6 = Fixp[5, 6]
q5_7 = Fixp[5, 7]
assert uq2_3.quant + uq3_4.quant == uq4_5(float(uq2_3.quant) + float(uq3_4.quant))
assert uq2_3.max + uq3_4.max == uq4_5(11.0)
assert uq3_4.max + uq3_4.max == uq4_5(15.0)
assert uq2_4.quant + uq3_4.quant == uq4_6(float(uq2_4.quant) + float(uq3_4.quant))
assert uq2_4.max + uq3_4.max == uq4_6(11.25)
assert uq3_4.max + uq3_5.max == uq4_6(15.25)
assert q2_3.quant + q3_4.quant == q4_5(float(q2_3.quant) + float(q3_4.quant))
assert q2_3.max + q3_4.max == q4_5(5.0)
assert q3_4.max + q3_4.max == q4_5(7.0)
assert q2_4.quant + q3_4.quant == q4_6(float(q2_4.quant) + float(q3_4.quant))
assert q2_4.max + q3_4.max == q4_6(5.25)
assert q3_4.max + q3_5.max == q4_6(7.25)
assert uq2_3.quant + q3_4.quant == q4_5(float(uq2_3.quant) + float(q3_4.quant))
assert uq2_3.max + q3_4.max == q4_5(7.0)
assert q2_3.max + uq3_4.max == q5_6(9.0)
assert uq3_4.max + q3_4.max == q5_6(11.0)
assert uq2_4.quant + q3_4.quant == q4_6(float(uq2_4.quant) + float(q3_4.quant))
assert uq2_4.max + q3_4.max == q4_6(7.25)
assert uq3_4.max + q3_5.max == q5_7(11.25)
assert q2_4.max + uq3_4.max == q5_7(9.25)
assert q2_3.min + q3_4.max == q4_5(1.5)
assert q3_4.min + q3_4.max == q4_5(-0.5)
assert q2_4.min + q3_4.max == q4_6(1.5)
assert q3_4.min + q3_5.max == q4_6(-0.25)
assert uq2_3.max + q3_4.min == q4_5(-0.5)
assert q2_3.min + uq3_4.max == q5_6(5.5)
assert uq3_4.max + q3_4.min == q5_6(3.5)
assert uq2_4.max + q3_4.min == q4_6(-0.25)
assert uq3_4.max + q3_5.min == q5_7(3.5)
assert q2_4.min + uq3_4.max == q5_7(5.5)
def test_ceil():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert ceil(uq2_4.max) == Ufixp[3, 5](4.0)
assert ceil(uq2_4(3.25)) == Ufixp[3, 5](4.0)
assert ceil(q2_3.min) == Fixp[3, 4](-2.0)
assert ceil(q2_3(-1.5)) == Fixp[3, 4](-1.0)
assert ceil(uq4_4.max) == uq4_4.max
assert ceil(q6_3.min) == q6_3.min
def test_floor():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert floor(uq2_4.max) == uq2_4(3.0)
assert floor(uq2_4(3.25)) == uq2_4(3.0)
assert floor(q2_3.min) == q2_3(-2.0)
assert floor(q2_3(-1.5)) == q2_3(-2.0)
assert floor(uq4_4.max) == uq4_4.max
assert floor(q6_3.min) == q6_3.min
def test_ge():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.5) >= q2_3(1.5)
assert q2_3(1.5) >= uq2_3(1.5)
assert uq2_3.max >= q2_3.min
assert q2_3.max >= uq2_3.min
def test_gt():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(2.0) > q2_3(1.5)
assert q2_3(1.5) > uq2_3(1.0)
assert uq2_3.max > q2_3.min
assert q2_3.max > uq2_3.min
def test_le():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.5) <= q2_3(1.5)
assert q2_3(1.5) <= uq2_3(1.5)
assert uq2_3.min <= q2_3.max
assert q2_3.min <= uq2_3.max
def test_lt():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.0) < q2_3(1.5)
assert q2_3(1.0) < uq2_3(1.5)
assert uq2_3.min < q2_3.max
assert q2_3.min < uq2_3.max
def test_lshift():
uq2_3 = Ufixp[2, 3]
uq4_3 = Ufixp[4, 3]
q2_3 = Fixp[2, 3]
q4_3 = Fixp[4, 3]
assert uq2_3.max << 2 == uq4_3(14.0)
assert q2_3.min << 2 == q4_3.min
assert uq2_3.max << 0 == uq2_3.max
assert q2_3.min << 0 == q2_3.min
def test_neg():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
q3_4 = Fixp[3, 4]
assert -uq2_3.max == q3_4(-float(uq2_3.max))
assert -q2_3.min == q3_4(-float(q2_3.min))
def test_rshift():
uq2_3 = Ufixp[2, 3]
uq4_3 = Ufixp[4, 3]
q2_3 = Fixp[2, 3]
q4_3 = Fixp[4, 3]
assert uq4_3(14.0) >> 2 == uq2_3.max
assert q4_3.min >> 2 == q2_3.min
assert uq2_3.max >> 0 == uq2_3.max
assert q2_3.min >> 0 == q2_3.min
def test_round():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert round(uq2_4.max) == Ufixp[3, 5](4.0)
assert round(uq2_4(3.25)) == Ufixp[3, 5](3.0)
assert round(q2_3.min) == Fixp[3, 4](-2.0)
assert round(q2_3(-1.5)) == Fixp[3, 4](-1.0)
assert round(uq4_4.max) == uq4_4.max
assert round(q6_3.min) == q6_3.min
def test_sub_val():
uq2_3 = Ufixp[2, 3]
uq2_4 = Ufixp[2, 4]
uq3_4 = Ufixp[3, 4]
uq3_5 = Ufixp[3, 5]
q2_3 = Fixp[2, 3]
q2_4 = Fixp[2, 4]
q3_4 = Fixp[3, 4]
q3_5 = Fixp[3, 5]
q4_5 = Fixp[4, 5]
q4_6 = Fixp[4, 6]
q5_6 = Fixp[5, 6]
q5_7 = Fixp[5, 7]
assert uq2_3.quant - uq3_4.quant == q4_5(0.0)
assert uq2_3.min - uq3_4.max == q4_5(-7.5)
assert uq2_4.quant - uq3_4.quant == q4_6(float(uq2_4.quant) - float(uq3_4.quant))
assert uq2_4.min - uq3_4.max == q4_6(-7.5)
assert uq3_4.min - uq3_5.max == q4_6(-7.75)
assert q2_3.quant - q3_4.quant == q4_5(0.0)
assert q2_3.min - q3_4.max == q4_5(-5.5)
assert q3_4.min - q3_4.max == q4_5(-7.5)
assert q3_4.max - q3_4.min == q4_5(7.5)
assert q2_4.quant - q3_4.quant == q4_6(float(q2_4.quant) - float(q3_4.quant))
assert q2_4.min - q3_4.max == q4_6(-5.5)
assert q2_4.max - q3_4.min == q4_6(5.75)
assert q3_4.min - q3_5.max == q4_6(-7.75)
assert q3_4.max - q3_5.min == q4_6(7.5)
assert uq2_3.quant - q3_4.quant == q4_5(0.0)
assert uq2_3.max - q3_4.min == q4_5(7.5)
assert q2_3.min - uq3_4.max == q5_6(-9.5)
assert uq3_4.max - q3_4.min == q5_6(11.5)
assert q3_4.min - uq3_4.max == q5_6(-11.5)
assert uq2_4.quant - q3_4.quant == q4_6(float(uq2_4.quant) - float(q3_4.quant))
assert uq2_4.max - q3_4.min == q4_6(7.75)
assert uq3_4.max - q3_5.min == q5_7(11.5)
assert q2_4.min - uq3_4.max == q5_7(-9.5)
|
utils/visualize_mel.py | BaoLocPham/hum2song | 108 | 12672941 | import numpy as np
import yaml
import argparse
import os
import random
import matplotlib.pyplot as plt
def save_img(path, spec_song=None, spec_hum=None):
if spec_song is None or spec_hum is None:
if spec_song is not None:
plt.imshow(spec_song, origin="lower")
plt.title("song", fontsize="medium")
plt.ylim(0, spec_song.shape[0])
if spec_hum is not None:
plt.imshow(spec_hum, origin="lower")
plt.title("hum", fontsize="medium")
plt.ylim(0, spec_hum.shape[0])
else:
fig, axes = plt.subplots(2, 1, squeeze=False)
axes[0, 0].imshow(spec_song, origin="lower")
axes[0, 0].set_title("song", fontsize="medium")
axes[0, 0].set_ylim(0, spec_song.shape[0])
axes[1, 0].imshow(spec_hum, origin="lower")
axes[1, 0].set_title("hum", fontsize="medium")
axes[1, 0].set_ylim(0, spec_hum.shape[0])
plt.savefig(path)
plt.close()
def visualize(dataset, in_dir, out_dir, num):
random.seed(1234)
files = os.listdir(os.path.join(in_dir, dataset, "hum"))
random.shuffle(files)
files = random.sample(files, k=min(num, len(files)))
os.makedirs(os.path.join(out_dir, dataset), exist_ok=True)
if dataset == "train" or dataset == "val":
for file in files:
spec_hum = np.load(os.path.join(in_dir, dataset, "hum", file))
spec_song = np.load(os.path.join(in_dir, dataset, "song", file))
save_img(os.path.join(out_dir, dataset, file[:-4] + ".jpg"), spec_song.T, spec_hum.T)
elif dataset == "public_test":
os.makedirs(os.path.join(out_dir, dataset, "hum"), exist_ok=True)
for file in files:
spec_hum = np.load(os.path.join(in_dir, dataset, "hum", file))
save_img(os.path.join(out_dir, dataset, "hum", file[:-4] + ".jpg"), spec_hum=spec_hum.T)
files = os.listdir(os.path.join(in_dir, dataset, "full_song"))
random.shuffle(files)
files = random.sample(files, k=min(num, len(files)))
os.makedirs(os.path.join(out_dir, dataset, "full_song"), exist_ok=True)
for file in files:
spec_song = np.load(os.path.join(in_dir, dataset, "full_song", file))
save_img(os.path.join(out_dir, dataset, "full_song", file[:-4] + ".jpg"), spec_song=spec_song.T)
def main(config, num):
in_dir = config["path"]["preprocessed_path"]
out_dir = config["path"]["visualization_path"]
dataset = ["train", "val", "public_test"]
os.makedirs(out_dir, exist_ok=True)
for data in dataset:
visualize(data, in_dir, out_dir, num)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=False,
default="config/preprocess.yaml",
help="path to preprocess.yaml")
parser.add_argument("--indir", type=str, required=False, help="path to input")
parser.add_argument("--outdir", type=str, required=False, help="path to output")
parser.add_argument("--num", type=int, required=False, default=5, help="num of samples")
args = parser.parse_args()
config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
if args.indir is not None:
config["path"]["preprocessed_path"] = args.indir
if args.outdir is not None:
config["path"]["visualization_path"] = args.outdir
main(config, num=args.num) |
examples/cloudml-churn-prediction/trainer/trainer/metadata.py | ruchirjain86/professional-services | 2,116 | 12672947 | <gh_stars>1000+
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define metadata constants."""
LABEL_COLUMN = 'labelArray'
KEY_COLUMN = 'fullVisitorId'
# columns to omit from model features
NON_FEATURE_COLUMNS = [LABEL_COLUMN, KEY_COLUMN]
NUM_INTERVALS = 4 # number of bounded churn duration intervals
SEED = 123
|
src/tests/error_tests.py | sanders41/pydocstyle | 776 | 12672961 | """Tests for the violations.Error class."""
import pytest
import collections
import textwrap
from pydocstyle.violations import Error
MockDefinition = collections.namedtuple('MockDefinition', ['source', 'start'])
def test_message_without_context():
"""Test a simple error message without parameters."""
error = Error('CODE', 'an error', None)
assert error.message == 'CODE: an error'
def test_message_with_context():
"""Test an error message with parameters."""
error = Error('CODE', 'an error', 'got {}', 0)
assert error.message == 'CODE: an error (got 0)'
def test_message_with_insufficient_parameters():
"""Test an error message with invalid parameter invocation."""
error = Error('CODE', 'an error', 'got {}')
with pytest.raises(IndexError):
assert error.message
def test_lines():
"""Test proper printing of source lines, including blank line trimming."""
error = Error('CODE', 'an error', None)
definition = MockDefinition(source=['def foo():\n',
' """A docstring."""\n',
'\n',
' pass\n',
'\n',
'\n'],
start=424)
error.set_context(definition, None)
print(error.lines)
assert error.lines == textwrap.dedent('''\
424: def foo():
425: """A docstring."""
426:
427: pass
''')
|
Validation/RecoParticleFlow/python/pfTauBenchmarkElecRejection_cfi.py | ckamtsikis/cmssw | 852 | 12672976 | import FWCore.ParameterSet.Config as cms
pfTauBenchmarkElecRejection = cms.EDAnalyzer("PFTauElecRejectionBenchmarkAnalyzer",
OutputFile = cms.untracked.string('tauBenchmarkElecRejection.root'),
InputTruthLabel = cms.InputTag('generatorSmeared'),
BenchmarkLabel = cms.string('PFTauElecRejection'),
minRecoPt = cms.double(15.0),
maxRecoAbsEta = cms.double(2.5),
minMCPt = cms.double(10.0),
maxMCAbsEta = cms.double(2.5),
maxDeltaR = cms.double(0.3),
PFTauProducer = cms.InputTag('shrinkingConePFTauProducer'),
PFTauDiscriminatorByIsolationProducer = cms.InputTag('shrinkingConePFTauDiscriminationByIsolation'),
PFTauDiscriminatorAgainstElectronProducer = cms.InputTag('shrinkingConePFTauDiscriminationAgainstElectron'),
ApplyEcalCrackCut = cms.bool(True),
GenMatchObjectLabel = cms.string('tau') # match with hadronic 'tau' or electron "e"
)
|
Chapter_16/ch16_ex9.py | pauldevos/Mastering-Object-Oriented-Python-Second-Edition | 108 | 12673029 | <filename>Chapter_16/ch16_ex9.py<gh_stars>100-1000
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 16. Example 9.
"""
import logging
import logging.config
import logging.handlers
import yaml
import time
# Producer/Consumer
# ==========================
# The Consumer
consumer_config = """
version: 1
disable_existing_loggers: False
handlers:
console:
class: logging.StreamHandler
stream: ext://sys.stderr
formatter: basic
formatters:
basic:
style: "{"
format: "{levelname:s}:{name:s}:{message:s}"
loggers:
combined:
handlers: [console]
formatter: detail
level: INFO
propagate: False
root:
handlers: [console]
level: INFO
"""
import collections
import logging
import multiprocessing
class Log_Consumer_1(multiprocessing.Process):
"""In effect, an instance of QueueListener."""
def __init__(self, queue):
self.source = queue
super().__init__()
logging.config.dictConfig(yaml.load(consumer_config))
self.combined = logging.getLogger(f"combined.{self.__class__.__qualname__}")
self.log = logging.getLogger(self.__class__.__qualname__)
self.counts = collections.Counter()
def run(self):
self.log.info("Consumer Started")
while True:
log_record = self.source.get()
if log_record == None: break
self.combined.handle(log_record)
self.counts[log_record.getMessage()] += 1
self.log.info("Consumer Finished")
self.log.info(self.counts)
# The Producers
class Log_Producer(multiprocessing.Process):
handler_class = logging.handlers.QueueHandler
def __init__(self, proc_id, queue):
self.proc_id = proc_id
self.destination = queue
super().__init__()
self.log = logging.getLogger(
f"{self.__class__.__qualname__}.{self.proc_id}")
self.log.handlers = [self.handler_class(self.destination)]
self.log.setLevel(logging.INFO)
def run(self):
self.log.info(f"Started")
for i in range(100):
self.log.info(f"Message {i:d}")
time.sleep(0.001)
self.log.info(f"Finished")
def demo():
# The Queue
import multiprocessing
# size = 10 # Too small.
size = 30 # Better
queue1: multiprocessing.Queue = multiprocessing.Queue(size)
# The consumer process
consumer = Log_Consumer_1(queue1)
consumer.start()
# The producers
producers = []
for i in range(10):
proc = Log_Producer(i, queue1)
proc.start()
producers.append(proc)
# Normal termination
for p in producers:
p.join()
queue1.put(None)
consumer.join()
logging.shutdown()
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
demo()
|
configs/mmcls/classification_onnxruntime_dynamic.py | aegis-rider/mmdeploy | 746 | 12673048 | _base_ = ['./classification_dynamic.py', '../_base_/backends/onnxruntime.py']
|
pyjobs/profiler/management/commands/linkedin_user_stats.py | Mdslino/PyJobs | 132 | 12673050 | import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from pyjobs.profiler.models import ProfilerData
from django.db import IntegrityError
from linkedin_scraper import Person, actions
from pprint import pprint
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from tqdm import tqdm
from time import sleep
def driver_factory():
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(options=chrome_options)
actions.login(driver, settings.LINKEDIN_EMAIL, settings.LINKEDIN_PASSWORD)
return driver
class Command(BaseCommand):
def handle(self, *args, **options):
driver = driver_factory()
for user in tqdm(
User.objects.filter(profile__linkedin__isnull=False).exclude(
profile__linkedin__in=["", " ", "\n"]
)
):
user_data = {
"name": "",
"about": "",
"experiences": [],
"education": [],
"interests": [],
}
try:
person = Person(
user.profile.linkedin,
contacts=[],
driver=driver,
close_on_complete=False,
)
user_data["name"] = person.name
user_data["about"] = person.about
for experience in person.experiences:
user_data["experiences"].append(
{
"description": experience.description,
"position_title": experience.position_title.replace(
"Nome da empresa\n", ""
),
"duration": experience.duration,
}
)
for education in person.educations:
user_data["educations"].append(
{
"from_date": education.from_date,
"to_date": education.to_date,
"degree": education.degree,
"company": education.company,
}
)
user_data["interests"] = [
interest.title for interest in person.interests
]
ProfilerData.objects.get_or_create(user=user, linkedin_data=user_data)
except Exception as e:
pass
driver.close()
|
examples/python.tornado/nats_client.py | ariasheets-wk/frugal | 144 | 12673062 | <reponame>ariasheets-wk/frugal
import logging
import sys
import uuid
from nats.io.client import Client as NATS
from tornado import ioloop, gen
from thrift.protocol import TBinaryProtocol
from thrift.transport.TTransport import TTransportException
from frugal.context import FContext
from frugal.protocol import FProtocolFactory
from frugal.provider import FServiceProvider
from frugal.tornado.transport import FNatsTransport
sys.path.append('gen-py.tornado')
from v1.music.f_Store import Client as FStoreClient # noqa
from v1.music.ttypes import Album # noqa
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
@gen.coroutine
def main():
# Declare the protocol stack used for serialization.
# Protocol stacks must match between clients and servers.
prot_factory = FProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory())
# Open a NATS connection to send requests
nats_client = NATS()
options = {
"verbose": True,
"servers": ["nats://127.0.0.1:4222"]
}
yield nats_client.connect(**options)
# Create a nats transport using the connected client
# The transport sends data on the music-service NATS topic
nats_transport = FNatsTransport(nats_client, "music-service")
try:
yield nats_transport.open()
except TTransportException as ex:
root.error(ex)
raise gen.Return()
# Using the configured transport and protocol, create a client
# to talk to the music store service.
store_client = FStoreClient(FServiceProvider(nats_transport, prot_factory),
middleware=logging_middleware)
album = yield store_client.buyAlbum(FContext(),
str(uuid.uuid4()),
"ACT-12345")
root.info("Bought an album %s\n", album)
yield store_client.enterAlbumGiveaway(FContext(),
"<EMAIL>",
"Kevin")
yield nats_transport.close()
yield nats_client.close()
def logging_middleware(next):
def handler(method, args):
service = '%s.%s' % (method.im_self.__module__,
method.im_class.__name__)
print '==== CALLING %s.%s ====' % (service, method.im_func.func_name)
ret = next(method, args)
print '==== CALLED %s.%s ====' % (service, method.im_func.func_name)
return ret
return handler
if __name__ == '__main__':
# Since we can exit after the client calls use `run_sync`
ioloop.IOLoop.instance().run_sync(main)
|
magma/ref.py | leonardt/magma | 167 | 12673083 | <filename>magma/ref.py<gh_stars>100-1000
import abc
import typing
import weakref
from magma.compatibility import IntegerTypes
class Ref:
@abc.abstractmethod
def __str__(self):
raise NotImplementedError()
def __repr__(self):
return self.qualifiedname()
@abc.abstractmethod
def qualifiedname(self, sep="."):
raise NotImplementedError()
@abc.abstractmethod
def anon(self):
raise NotImplementedError()
def parent(self):
return self
def root(self) -> typing.Optional['Ref']:
parent = self.parent()
if parent is self:
return self
return parent.root()
class AnonRef(Ref):
def __init__(self):
self.name = None
def __str__(self):
return f"AnonymousValue_{id(self)}"
def qualifiedname(self, sep='.'):
return f"AnonymousValue_{id(self)}"
def anon(self):
return True
class NamedRef(Ref):
def __init__(self, name, value=None):
if not isinstance(name, (str, int)):
raise TypeError("Expected string or int")
self.name = name
self._value = value if value is None else weakref.ref(value)
def __str__(self):
return self.name
def qualifiedname(self, sep="."):
return self.name
def anon(self):
return False
def value(self):
return self._value if self._value is None else self._value()
class TempNamedRef(NamedRef):
pass
class InstRef(NamedRef):
def __init__(self, inst, name):
super().__init__(name)
if not inst:
raise ValueError(f"Bad inst: {inst}")
self.inst = inst
def qualifiedname(self, sep="."):
name = self.name
if isinstance(self.name, IntegerTypes):
# Hack, Hack, Hack!
# NOTE: This is used for verilog instances that don't use named
# port (wired by index instead), so the ports are referred to by
# index instead of name and we use the array indexing syntax to
# represent them
# See mantle's generic verilog target for example use case
if sep == ".":
return f"{self.inst.name}[{self.name}]"
return self.inst.name + sep + str(name)
class LazyInstRef(InstRef):
def __init__(self, name):
self.name = name
self._inst = None
@property
def inst(self):
if self._inst is not None:
return self._inst
return LazyCircuit
def qualifiedname(self, sep="."):
return super().qualifiedname(sep)
def set_inst(self, inst):
if self._inst is not None:
raise Exception("Can only set definition of LazyInstRef once")
self._inst = inst
class DefnRef(NamedRef):
def __init__(self, defn, name):
super().__init__(name)
if not defn:
raise ValueError(f"Bad defn: {defn}")
self.defn = defn
def qualifiedname(self, sep="."):
if sep == ".":
return self.defn.__name__ + sep + self.name
return self.name
class LazyCircuit:
name = ""
class LazyDefnRef(DefnRef):
def __init__(self, name):
self.name = name
self._defn = None
@property
def defn(self):
if self._defn is not None:
return self._defn
return LazyCircuit
def qualifiedname(self, sep="."):
return super().qualifiedname(sep)
def set_defn(self, defn):
if self._defn is not None:
raise Exception("Can only set definition of LazyDefnRef once")
self._defn = defn
class ArrayRef(Ref):
def __init__(self, array, index):
self.array = array
self.index = index
def __str__(self):
return self.qualifiedname()
def qualifiedname(self, sep="."):
return f"{self.array.name.qualifiedname(sep=sep)}[{self.index}]"
def anon(self):
return self.array.name.anon()
def parent(self):
return self.array.name
class TupleRef(Ref):
def __init__(self, tuple, index):
self.tuple = tuple
self.index = index
def __str__(self):
return self.qualifiedname()
def qualifiedname(self, sep="."):
try:
int(self.index)
return (self.tuple.name.qualifiedname(sep=sep) +
"[" + str(self.index) + "]")
except ValueError:
return (self.tuple.name.qualifiedname(sep=sep) +
sep + str(self.index))
def anon(self):
return self.tuple.name.anon()
def parent(self):
return self.tuple.name
class PortViewRef(Ref):
"""
Used for values that are connection references to a hierarchical value
(using the view logic)
"""
def __init__(self, view):
self.view = view
def qualifiedname(self, sep="."):
return self.view.port.name.qualifiedname(sep)
def anon(self):
return self.view.port.anon()
def __str__(self):
return str(self.view.port.name)
def root(self):
return self.view.root()
def get_ref_inst(ref):
"""
If value is part of a port on an instance, return that instance,
otherwise None.
"""
root = ref.root()
if not isinstance(root, InstRef):
return None
return root.inst
def get_ref_defn(ref):
"""
If value is part of a port on an definition, return that definition,
otherwise None.
"""
root = ref.root()
if not isinstance(root, DefnRef):
return None
return root.defn
def is_temp_ref(ref):
root = ref.root()
return isinstance(root, (TempNamedRef, AnonRef))
|
rlbench/tasks/put_books_on_bookshelf.py | vonHartz/RLBench | 619 | 12673098 | from typing import List, Tuple
from pyrep.objects import Dummy
from pyrep.objects.shape import Shape
from pyrep.objects.proximity_sensor import ProximitySensor
from rlbench.backend.task import Task
from rlbench.backend.conditions import DetectedCondition, NothingGrasped
class PutBooksOnBookshelf(Task):
def init_task(self) -> None:
self._success_sensor = ProximitySensor('success')
self._books = [Shape('book2'), Shape('book1'), Shape('book0')]
self._waypoints_idxs = [5, 11, -1]
self.register_graspable_objects(self._books)
def init_episode(self, index: int) -> List[str]:
self.register_success_conditions([
DetectedCondition(
b, self._success_sensor) for b in self._books[:index+1]
])
self.register_stop_at_waypoint(self._waypoints_idxs[index])
return ['put %d books on bookshelf' % (index + 1),
'pick up %d books and place them on the top shelf' % (index + 1),
'stack %d books up on the top shelf' % (index + 1)]
def variation_count(self) -> int:
return 3
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0.0, 0.0, -3.14/2], [0.0, 0.0, 3.14/2]
|
vilya/models/ngit/patch.py | mubashshirjamal/code | 1,582 | 12673153 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from itertools import groupby
from vilya.libs.generated import Generated
from vilya.libs.text import is_image
from vilya.models.consts import LINECOMMENT_INDEX_EMPTY
from vilya.models.ngit.hunk import Hunk
MAX_PATCH_MOD_LINES = 2000
INVALID_OID = b'0' * 40
class Patch(object):
# libgit2 status definition
# GIT_DELTA_ADDED: code = 'A'
# GIT_DELTA_DELETED: code = 'D'
# GIT_DELTA_MODIFIED: code = 'M'
# GIT_DELTA_RENAMED: code = 'R'
# GIT_DELTA_COPIED: code = 'C'
# GIT_DELTA_IGNORED: code = 'I'
# GIT_DELTA_UNTRACKED: code = '?'
# default: code = ' '
def __init__(self, repo, diff, patch, linecomments=[],
is_limit_lines=True):
self.repo = repo
self.diff = diff
self._patch = patch
self._old_file_length = None
self._new_file_length = None
# patch sha == diff sha
# FIXME: commit diff old_sha 貌似为 None
self.old_sha = patch['old_sha']
self.new_sha = patch['new_sha']
# oids # an oid encoded in hex (40 bytes) # invalid oid = '0000...'
self.old_file_sha = patch['old_oid']
self.new_file_sha = patch['new_oid']
self.status = patch['status']
self.old_file_path = patch['old_file_path']
self.new_file_path = patch['new_file_path']
# TODO: remove self.filepath
self.filepath = self.old_file_path
self.additions = patch['additions']
self.deletions = patch['deletions']
self.similarity = patch['similarity']
self.binary = patch['binary']
self._generated = None
# TODO: move to def init_comment_groups
def func_filter(l):
if l.has_oids:
return l.from_oid == self.old_file_sha \
and l.to_oid == self.new_file_sha
else:
return l.from_sha == self.new_sha
self.linecomments = filter(func_filter, linecomments)
self.linecomments_has_linenum = []
self.linecomments_has_pos = []
for l in self.linecomments:
(self.linecomments_has_pos,
self.linecomments_has_linenum)[l.has_linenum].append(l)
if is_limit_lines and self.additions + self.deletions > \
MAX_PATCH_MOD_LINES:
self.is_toobig = True
self.hunks = []
else:
self.is_toobig = False
self.init_comment_groups()
self.init_hunks(patch)
def init_comment_groups(self):
# TODO: 用 oids 做 linecomments 的过滤
keyfunc_pos = lambda x: x.position
keyfunc_line = lambda x: x.linenum
self.comments_by_pos = {}
self.comments_by_line = {}
if self.linecomments_has_pos:
self.linecomments_has_pos.sort(key=keyfunc_pos)
self.comments_by_pos = dict((k, list(v))
for k, v in groupby(
self.linecomments_has_pos, key=keyfunc_pos))
if self.linecomments_has_linenum:
self.linecomments_has_linenum.sort(key=keyfunc_line)
self.comments_by_line = dict((k, list(v))
for k, v in groupby(
self.linecomments_has_linenum, key=keyfunc_line))
# TODO: refactor this! T^T
def init_hunks(self, raw_patch):
''' init Hunks, add extra_contexts when there're linecomments not
involved '''
EXTRE_CONTEXT_LINES = 3
def expand_hunk(hunk, last_hunk_old_end, type,
MAX_LINE_NUM=99999,
MIN_LINE_NUM=0):
if type == 'up':
min_old_not_involved = MAX_LINE_NUM
for linecomment in self.linecomments_has_linenum:
not_involved = False
old, new = linecomment.linenum
if old != LINECOMMENT_INDEX_EMPTY \
and new != LINECOMMENT_INDEX_EMPTY:
not_involved = last_hunk_old_end < old \
and old < hunk.old_start
if not_involved:
min_old_not_involved = min(min_old_not_involved, old)
if min_old_not_involved != MAX_LINE_NUM:
contexts = self.get_contexts(
min_old_not_involved - EXTRE_CONTEXT_LINES,
hunk.old_start)
if contexts:
hunk.expand_top_contexts(contexts)
elif type == 'bottom':
max_old_not_involved = MIN_LINE_NUM
for linecomment in self.linecomments_has_linenum:
not_involved = False
old, new = linecomment.linenum
if old != LINECOMMENT_INDEX_EMPTY \
and new != LINECOMMENT_INDEX_EMPTY:
not_involved = last_hunk_old_end < old
if not_involved:
max_old_not_involved = max(max_old_not_involved, old)
if max_old_not_involved != MIN_LINE_NUM:
contexts = self.get_contexts(
hunk.old_end + 1,
max_old_not_involved + 1 + EXTRE_CONTEXT_LINES)
if contexts:
hunk.expand_bottom_contexts(contexts)
self.hunks = [Hunk(self, h) for h in raw_patch['hunks']]
if not self.hunks:
return
# TODO: 再 细分 pull/new pull/discussion compare 等?
if self.linecomments_has_linenum and self.repo.provide('project'):
last_hunk_old_end = 0
for hunk in self.hunks:
expand_hunk(hunk, last_hunk_old_end, type='up')
last_hunk_old_end = hunk.old_end
expand_hunk(hunk, last_hunk_old_end, type='bottom')
if self.repo.provide('project'):
first_hunk = self.hunks[0]
last_hunk = self.hunks[-1]
# add top_hunk
if first_hunk.old_start > EXTRE_CONTEXT_LINES + 1:
contexts = self.get_contexts(1, EXTRE_CONTEXT_LINES + 1)
if contexts:
top_hunk = Hunk(self,
old_start=1,
new_start=1,
old_lines=EXTRE_CONTEXT_LINES,
new_lines=EXTRE_CONTEXT_LINES,
contexts=contexts)
self.hunks.insert(0, top_hunk)
elif first_hunk.old_start > 1:
contexts = self.get_contexts(1, first_hunk.old_start)
if contexts:
first_hunk.expand_top_contexts(contexts)
# add bottom_hunk
if last_hunk.old_end + EXTRE_CONTEXT_LINES < self.old_file_length:
bottom_hunk_old_start = self.old_file_length - EXTRE_CONTEXT_LINES + 1 # noqa
bottom_hunk_new_start = self.new_file_length - EXTRE_CONTEXT_LINES + 1 # noqa
contexts = self.get_contexts(bottom_hunk_old_start,
self.old_file_length + 1)
if contexts:
bottom_hunk = Hunk(self,
old_start=bottom_hunk_old_start,
new_start=bottom_hunk_new_start,
old_lines=EXTRE_CONTEXT_LINES,
new_lines=EXTRE_CONTEXT_LINES,
contexts=contexts)
self.hunks.append(bottom_hunk)
elif last_hunk.old_end < self.old_file_length:
contexts = self.get_contexts(last_hunk.old_end + 1,
self.old_file_length + 1)
if contexts:
last_hunk.expand_bottom_contexts(contexts)
# update hunks
pos = 1
for i, hunk in enumerate(self.hunks):
hunk.start_pos = pos
pos += hunk.n_lines + 1 # +1 means hunk_heading
if i > 0:
last = self.hunks[i - 1]
hunk.skipped_old_start = last.old_end + 1
hunk.skipped_new_start = last.new_end + 1
hunk.skipped_old_end = hunk.old_start - 1
hunk.skipped_new_end = hunk.new_start - 1
def get_contexts(self, start, end):
''' get patch's context lines in [start, end) '''
if self.old_file_sha == INVALID_OID:
ref = self.new_sha
elif self.new_file_sha == INVALID_OID:
ref = self.old_sha
else:
ref = self.old_sha or self.new_sha
contexts = self.repo.get_contexts(ref, self.old_file_path,
start, end)
return contexts
@property
def old_file_length(self):
if self._old_file_length is not None:
return self._old_file_length
if self.old_file_sha == INVALID_OID:
self._old_file_length = 0
return self._old_file_length
ref = self.old_sha or self.new_sha
self._old_file_length = self.repo.get_file_n_lines(
ref, self.old_file_path)
return self._old_file_length
@property
def new_file_length(self):
if self._new_file_length is not None:
return self._new_file_length
if self.new_file_sha == INVALID_OID:
self._new_file_length = 0
return self._new_file_length
ref = self.new_sha
self._new_file_length = self.repo.get_file_n_lines(
ref, self.new_file_path)
return self._new_file_length
@property
def image(self):
return is_image(self.old_file_path)
@property
def generated(self):
# FIXME: generated 性能问题
if self._generated is not None:
return self._generated
def get_data():
data = ''
try:
if self.status == 'D':
blob = self.repo.get_file(self.old_sha, self.old_file_path)
else:
blob = self.repo.get_file(self.new_sha, self.new_file_path)
if blob:
data = blob.data
except: # very first commit ??
data = ''
return data
generated = Generated.is_generated(self.new_file_path, get_data)
self._generated = generated
return generated
#@property
# def n_lines(self):
# return sum([hunk.n_lines for hunk in self.hunks])
# TODO: remove this
@property
def content(self):
content = []
for h in self.hunks:
content.append(h.heading)
for l in h.lines:
content.append(l)
return content
# TODO: rewrite
# FIXME: more explanation
def smart_slice(self, num):
content = self.content[:num + 1]
if len(content) > 15:
tip_pos = 0
for idx, line in enumerate(content):
if line.old is None and line.new is None:
tip_pos = idx
content = content[tip_pos:]
if len(content) > 25:
return content[-25:]
else:
return content
return content
|
libs/omninet/cnp/SubLayers.py | kyteinsky/OmniNet | 525 | 12673156 | <reponame>kyteinsky/OmniNet
#
# Copyright 2019 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
Authors: <NAME>
OmniNet transformer sub layers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None,k_gate=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if k_gate is not None:
attn=torch.mul(attn,k_gate)
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None,k_gate=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
if k_gate is not None:
k_gate = k_gate.transpose(0, 1)
k_gate=k_gate.reshape(n_head*sz_b,len_q,len_v)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
#A Weighting score for the keys is provided
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask,k_gate=k_gate)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
attn=attn.view(n_head,sz_b,len_q,len_v).transpose(0,1)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x
output = self.w_2(F.relu(self.w_1(output)))
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
|
client/fcImgThread.py | shpoellet/telecine | 138 | 12673157 | <gh_stars>100-1000
import cv2 #needed for histogram plotting and preview window display
#need to build and install opencv version 3 to support frame blending
import threading
import struct
import logging
import config
import numpy as np
import io
from time import sleep
from fractions import Fraction
from PyQt5 import QtCore as qtcore
from PyQt5 import QtGui
mask_pct = .8 #this determines what (center) portion of the image is used for histogram calculations. (Avoid using black borders)
blender=cv2.createMergeMertens() #COMMENT OUT IF NOT USING opencv version 3+ and bracketing
def thefilename(i,suffix=""):
fname=str(config.folder) + "/img%.5d%s.jpg" % (i,suffix)
logging.debug(fname)
return fname
def subDims(amt, fraction): #sub dimensions: Pass it a width or height and get the origin and width/height of the center portion
return(int(amt*(1-fraction)/2),int(amt*(1+fraction)/2))
def getMask(img, fraction):
mask = np.zeros(img.shape[:2], np.uint8)
(x,x2,y,y2)=subDims(img.shape[0],fraction)+subDims(img.shape[1],fraction)
mask[x:x2, y:y2]=255
return mask
def saveable_img(img):
return np.array(img,dtype=float)*float(255)
def adjustable_img(img):
return cv2.convertScaleAbs(img, alpha=255)
#return np.array(img*float(250),dtype=np.uint8)
def saveable_255_img(img):
return np.array(img,dtype=float)
def quickBrightness(img):
#make a thumbnail, convert to grayscale, get avg value
brt=cv2.mean(cv2.cvtColor(cv2.resize(img, (120,90)),cv2.COLOR_BGR2GRAY))
brt=int(brt[0]*100)
logging.debug("Brt="+str(brt))
return brt
def correctLens(img, w, h):
distCoeff = np.zeros((4,1),np.float64)
# TODO: add your coefficients here!
k1 = config.lensCorrValue # negative to remove barrel distortion
k2 = 0.0;
p1 = 0.0;
p2 = 0.0;
distCoeff[0,0] = k1;
distCoeff[1,0] = k2;
distCoeff[2,0] = p1;
distCoeff[3,0] = p2;
# assume unit matrix for camera
cam = np.eye(3,dtype=np.float32)
cam[0,2] = w/2.0 # define center x
cam[1,2] = h/2.0 # define center y
cam[0,0] = 100. # define focal length x
cam[1,1] = 100. # define focal length y
# here the undistortion will be computed
dst = cv2.undistort(img,cam,distCoeff)
return dst
def adjustLevels(img):
h,w=img.shape[:2]
#perform lens correction if selected
logging.debug("AdjustingLevels")
logging.debug(np.shape(img))
logging.debug(np.shape(config.flatFieldImg))
if config.antiVignetting and (len(config.flatFieldImg) > 0) and (np.shape(img)[0]==np.shape(config.flatFieldImg)[0]):
img=flatFieldCorrection(img)
if config.lensCorr:
img=correctLens(img, w, h)
#perform rotation if selected
if config.rotation:
M = cv2.getRotationMatrix2D((w/2,h/2),config.rotationValue,1)
img = cv2.warpAffine(img,M,(w,h))
#perform cropping if selected
if config.cropping:
img=img[config.cropT:h-config.cropB, config.cropL:w-config.cropR]
return img# cv2.LUT(img, config.lut)
#def fadj(img, ff):
# img/ff*exp((max(0,img-128)+128)/256)
def flatFieldCorrection(img):
logging.debug("Performing Flat Field Correction")
logging.debug(np.shape(img))
logging.debug(img[1000][1000])
img=img/config.flatFieldImg
img=np.clip(img,0,255)
img=img.astype(np.uint8)
return img
def genFlatFieldImg():
logging.debug("Generating Flat Field Image")
avgimg=np.mean(config.calibrationImages, axis=0)
config.flatFieldImg=np.clip(np.true_divide(avgimg,np.mean(avgimg)), .5, 2.0)
#config.flatFieldImg=np.clip(np.true_divide(config.calibrationImages[0],np.mean(config.calibrationImages[0])), .8, 1.2)
class imgThread (qtcore.QThread):#(threading.Thread):
updateFrameNumSig = qtcore.pyqtSignal(int)
updateSSSig = qtcore.pyqtSignal(int, int, int)
updateGainsSig = qtcore.pyqtSignal (int, int)
updateStatusSig = qtcore.pyqtSignal (str)
plotHistogramSig = qtcore.pyqtSignal (list, np.ndarray, float)
displayWashoutsSig = qtcore.pyqtSignal (list, float, float)
displayImgSig = qtcore.pyqtSignal (np.ndarray, str)
def __init__(self, connection, app):
qtcore.QThread.__init__(self, parent=app)
self.threadID = 1
self.name = "ImgThread"
self.conn = connection
def updateFrameNum(self,i):
self.updateFrameNumSig.emit(i)
#self.emit(qtcore.SIGNAL("updateFrameNum(int)"), i)
def updateSS(self, ss, again, dgain):
self.updateSSSig.emit(ss,again,dgain)
#self.emit(qtcore.SIGNAL("updateSS(int, int, int)"), ss, again, dgain)
def updateGains(self, r, b):
self.updateGainsSig.emit(r,b)
#self.emit(qtcore.SIGNAL("updateGains(int, int)"), r, b)
def updateStatus(self,status):
self.updateStatusSig.emit(status)
#self.emit(qtcore.SIGNAL("updateStatus(QString)"), status)
def blendImgList(self,imList,show, fnum):
logging.debug("Starting blend Thread")
cvimg=blender.process(imList)
logging.debug("Done Blending")
cvimg=adjustLevels(adjustable_img(cvimg))
#cvimg=adjustable_img(cvimg)
title=thefilename(fnum)
if config.wait_for_test:
title="TEST"
else:
cv2.imwrite(thefilename(fnum),cvimg, [int(cv2.IMWRITE_JPEG_QUALITY), 97])
if config.wait_for_test or show:
self.showImage(cvimg,title)
self.plothist(cvimg)#,True)
def plothist(self, img, fScale=False): #perhaps this should be called in a separate thread?
bins=256
rangetop=1.0 if fScale else 256
imgsize=img.shape
mask = getMask(img, mask_pct)
bwimg=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bhist = cv2.calcHist([bwimg],[0],mask,[256],[0,rangetop])
bhist[0]=0
over = [0,0,0,0]
over[3]=sum(bhist[:10])
px = imgsize[0]*imgsize[1]*mask_pct*mask_pct
ylim=px/128 #arbitrary value to keep y limit consistent and reasonable
hists=[]
for i in range(0,3):
hist = cv2.calcHist([img],[i],mask,[256],[0,rangetop])
over[i]=sum(hist[252:])
hists.append(hist)
avg=int(cv2.mean(bwimg)[0]*100.0/rangetop)
#logging.debug("Sending Signal")
self.plotHistogramSig.emit(hists,bhist,px)
self.displayWashoutsSig.emit(over, px, avg)
#self.emit(qtcore.SIGNAL("plotHistogram(PyQt_PyObject, PyQt_PyObject, float)"), hists, bhist, px)
#self.emit(qtcore.SIGNAL("displayWashouts(PyQt_PyObject, float, float)"), over, px, avg)
def showImage(self, im, title="Image"):
im2=cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
self.displayImgSig.emit(im2, title)
#self.emit(qtcore.SIGNAL("displayImg(PyQt_PyObject, QString)"), im2, title)
def run(self):
logging.debug("Imgthread running fn")
image_stream = io.BytesIO()
imglist = []
pframe=0 #counter for preview frames, so we only generate histogram 1 in 10 times
try:
while not config.exitFlag:
logging.debug("looping")
while config.prevOn or config.captureOn:
logging.debug("waiting on img")
imgflag = self.conn.read(1)
imgflag = imgflag.decode("utf-8")
logging.debug(imgflag)
if imgflag == "q":
break
if imgflag == "f":
ss = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
again = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
dgain = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
logging.debug("SS:"+str(ss))
self.updateSS(ss,again,dgain)
elif imgflag == "g":
r = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
b = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
logging.debug("Gains "+str(r)+" "+str(b))
self.updateGains(r,b)
elif imgflag == "t":
config.prevOn = False
self.showImage(cvimg2, "TEST")
break
else:
if imgflag == "s" or imgflag == "b":
self.updateFrameNum(config.frame_number)
image_len = struct.unpack('<L', self.conn.read(struct.calcsize('<L')))[0]
logging.debug("Image:"+str(image_len))
if not image_len:
logging.debug("Quit Signal (0 Length image) received from client")
break
logging.debug(imgflag+str(image_len))
image_stream.write(self.conn.read(image_len))
image_stream.seek(0)
if imgflag == "s": #single image
cvimg=cv2.imdecode(np.fromstring(image_stream.read(image_len), dtype=np.uint8),1)
if config.flatFieldCalibration == True:
config.calibrationImages.append(cvimg)
logging.debug("Collecting CalibrationData")
if len(config.calibrationImages)>4:
genFlatFieldImg()
config.flatFieldCalibration=False
continue
cvimg2=adjustLevels(cvimg)
#tmp=image_stream.read(image_len)
if config.wait_for_test:
self.showImage(cvimg2,"TEST")
#else:
# process_for_brightness(cvimg)
self.plothist(cvimg2)
#logging.debug("Single Shown")
if not config.wait_for_test:
filename = thefilename(config.frame_number)
with open(filename, 'w') as imfile:
self.showImage(cvimg2, filename)
cv2.imwrite(filename, cvimg2, [int(cv2.IMWRITE_JPEG_QUALITY), 97])
self.updateFrameNum(config.frame_number)
config.frame_number+=1
#logging.debug("Single Written to "+filename)
if imgflag == "p": #preview image
cvimg=cv2.imdecode(np.fromstring(image_stream.read(image_len), dtype=np.uint8),1)
logging.debug(cvimg.dtype)
cvimg2=adjustLevels(cvimg)
self.showImage(cvimg2,"Live Preview")
pframe+=1
if pframe>10:
pframe=0
self.plothist(cvimg2)
if imgflag == "a": #one of several blended images
#save image data in variable, dont increment or update display
logging.debug('start a')
imglist.append(cv2.imdecode(np.fromstring(image_stream.read(image_len), dtype=np.uint8),1))
if imgflag == "b": #the last of several blended images
#logging.debug('start read final')
imglist.append(cv2.imdecode(np.fromstring(image_stream.read(image_len), dtype=np.uint8),1))
self.updateStatus(str(config.frame_number)+' '+' '.join(map(str,list(map(quickBrightness,imglist)))))
thd=threading.Thread(target=self.blendImgList, args=(imglist[:],True,config.frame_number)) #colon in brackets makes new copy of list
thd.start() #tried this using multiprocessing, but it hung when processing merge_mertens
imglist=[]
if not config.wait_for_test:
config.frame_number+=1
image_stream.seek(0)
image_stream.truncate()
#logging.debug("Waiting for prevOn...")
sleep(1)
finally:
logging.debug("Thread closing %.1d"%config.exitFlag)
cv2.destroyAllWindows()
self.conn.close()
|
tests/core/test_indexed_array.py | DataLab-CQU/stellargraph | 2,428 | 12673203 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from stellargraph import IndexedArray
def test_indexed_array_empty():
frame = IndexedArray()
assert frame.index == range(0)
np.testing.assert_array_equal(frame.values, np.empty((0, 0)))
def test_indexed_array_non_empty():
list_ids = ["a", "b", "c"]
array_ids = np.array([10, -1, 2])
range_ids = range(106, 100, -2)
values = np.random.rand(3, 4, 5)
# this test uses 'is' checks to validate that there's no copying of data
frame = IndexedArray(values)
assert frame.index == range(3)
assert frame.values is values
frame = IndexedArray(values, index=list_ids)
assert frame.index is list_ids
assert frame.values is values
frame = IndexedArray(values, index=array_ids)
assert frame.index is array_ids
assert frame.values is values
frame = IndexedArray(values, index=range_ids)
assert frame.index is range_ids
assert frame.values is values
def test_indexed_array_invalid():
values = np.random.rand(3, 4, 5)
with pytest.raises(TypeError, match="values: expected a NumPy array .* found int"):
IndexedArray(123)
with pytest.raises(
ValueError,
match=r"values: expected an array with shape .* found shape \(\) of length 0",
):
IndexedArray(np.zeros(()))
with pytest.raises(
ValueError,
match=r"values: expected an array with shape .* found shape \(123,\) of length 1",
):
IndexedArray(np.zeros(123))
# check that the index `len`-failure works with or without index inference
with pytest.raises(TypeError, match="index: expected a sequence .* found int"):
IndexedArray(index=0)
with pytest.raises(TypeError, match="index: expected a sequence .* found int"):
IndexedArray(values, index=123)
with pytest.raises(
ValueError, match="values: expected the index length 2 .* found 3 rows"
):
IndexedArray(values, index=range(0, 3, 2))
|
2017/async-socket-server/server-test.py | mikiec84/code-for-blog | 1,199 | 12673316 | # Tests a concurrent server, by connecting multiple clients sending pre-set
# messages, and comparing the echoes with expected values.
#
# Run with -h for full usage.
#
# <NAME> [http://eli.thegreenplace.net]
# This code is in the public domain.
import argparse
import itertools
import logging
import queue
import socket
import subprocess
import sys
import threading
import time
def server_runner(path, args, stop_event):
"""Runs the server as a subprocess until stop is requested.
Run this function in a separate thread!
path is the path to the server to run, with the given args. If 'path' ends
with .py, a python interpreter is prepended. The args have to be a (possibly
empty) iterable.
stop_event is a threading.Event object; when it's set, the subprocess is
killed and this function returns.
"""
runcmd = ['python3.6', '-u', path] if path.endswith('.py') else [path]
runcmd.extend(args)
logging.info('server_runner: executing subprocess "{0}"'.format(runcmd))
proc = subprocess.Popen(runcmd)
logging.info('server_runner waiting for stop event')
stop_event.wait()
logging.info('server_runner sending kill to subprocess')
proc.terminate()
try:
proc.wait(timeout=0.2)
except subprocess.TimeoutExpired:
logging.info('server_runner: subprocess did not die within timeout')
def socket_reader(sockobj, outq, exit_event):
"""Reads from sockobj, 1 byte at a time; places results in outq.
This function runs in a loop until the sockobj connection is closed or until
exit_event is set.
"""
while not exit_event.is_set():
try:
buf = sockobj.recv(1)
if len(buf) < 1:
break
outq.put(buf)
except socket.timeout:
continue
except OSError:
break
def assert_queue_contains(q, val, timeout=0.1):
try:
v = q.get(timeout=timeout)
assert v == val
except queue.Empty:
assert False, f'queue was empty with timeout={timeout}'
def assert_queue_empty(q, wait=0.1):
time.sleep(wait)
assert q.empty(), 'queue had {0} with wait={1}'.format(q.get(), wait)
def client_thread_runner(client_body_func, port, initial_timeout=0.1):
"""Abstracts the function running within a client thread.
Connects to the port with a socket, launches a reading thread and makes sure
to shut down properly. client_body_func is the actual interaction with a
socket, once connected.
"""
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.settimeout(initial_timeout)
sockobj.connect(('localhost', port))
logging.info('{0} connected to server'.format(client_body_func.__name__))
readq = queue.Queue()
exit_event = threading.Event()
tread = threading.Thread(
target=socket_reader,
args=(sockobj, readq, exit_event))
tread.start()
try:
client_body_func(sockobj, readq, initial_timeout)
finally:
# Closing the socket before killing the server helps the bound socket be
# fully released on the server side; otherwise it may be kept alive by
# the kernel for a while after the server process exits.
sockobj.shutdown(socket.SHUT_RDWR)
sockobj.close()
exit_event.set()
tread.join()
def client0(sock, readq, initial_timeout):
assert_queue_contains(readq, b'*', timeout=initial_timeout)
assert_queue_empty(readq)
def client1(sock, readq, initial_timeout):
assert_queue_contains(readq, b'*', timeout=initial_timeout)
sock.send(b'abcdef')
assert_queue_empty(readq)
sock.send(b'^')
assert_queue_empty(readq)
sock.send(b'f')
assert_queue_contains(readq, b'g')
sock.send(b'1234')
assert_queue_contains(readq, b'2')
assert_queue_contains(readq, b'3')
assert_queue_contains(readq, b'4')
assert_queue_contains(readq, b'5')
sock.send(b'$')
assert_queue_empty(readq)
sock.send(b'1234')
assert_queue_empty(readq)
sock.send(b'^')
sock.send(b'xy')
assert_queue_contains(readq, b'y')
assert_queue_contains(readq, b'z')
def client2(sock, readq, initial_timeout):
assert_queue_contains(readq, b'*', timeout=initial_timeout)
sock.send(b'^ab$^kl$^80$50')
for b in [b'b', b'c', b'l', b'm', b'9', b'1']:
assert_queue_contains(readq, b)
assert_queue_empty(readq)
def client3(sock, readq, initial_timeout):
assert_queue_contains(readq, b'*', timeout=initial_timeout)
sock.send(b'^$^$^$^$^$^$$^$$$$foobarjoemoedoe^$$')
assert_queue_empty(readq)
def test_main():
argparser = argparse.ArgumentParser('Server test')
argparser.add_argument('server_path', help='path to the server executable')
argparser.add_argument('-p', '--server-port', default=9090, type=int,
help='the server listens on this port')
argparser.add_argument('--timeout-bump', default=0.0, type=float,
help='amount of time (in sec) by which to bump the '
'timeout between consecutive clients')
argparser.add_argument('-n', '--num-clients', default=2, type=int,
help='number of clients to launch simultaneously; ')
argparser.add_argument('--loop', default=1, type=int,
help='launch test in a loop')
args = argparser.parse_args()
assert args.num_clients >= 1
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
# Launch the server in a thread, listening on the port.
stop_event = threading.Event()
server_thread = threading.Thread(
target=server_runner,
args=(args.server_path, [str(args.server_port)], stop_event))
server_thread.start()
time.sleep(0.3)
TIMEOUT = 0.5 + (args.num_clients - 1) * args.timeout_bump
for i in range(args.loop):
logging.info('** Test iteration {}'.format(i))
client_iter = itertools.cycle([client0, client1, client2, client3])
threads = []
for i in range(args.num_clients):
tester_thread = threading.Thread(
target=client_thread_runner,
args=(next(client_iter), args.server_port, TIMEOUT))
tester_thread.start()
threads.append(tester_thread)
time.sleep(TIMEOUT)
for thread in threads:
thread.join()
stop_event.set()
if __name__ == '__main__':
test_main()
|
tests/plugins/test_buffer.py | augusto-herrmann/frictionless-py | 247 | 12673321 | <gh_stars>100-1000
from frictionless import Resource
# Loader
def test_buffer_loader():
source = b"header1,header2\nvalue1,value2\nvalue3,value4"
with Resource(source, format="csv") as resource:
assert resource.header == ["header1", "header2"]
assert resource.read_rows() == [
{"header1": "value1", "header2": "value2"},
{"header1": "value3", "header2": "value4"},
]
def test_buffer_loader_recursion_error_issue_647():
with open("data/issue-647.csv.txt", "rb") as file:
with Resource(file.read(), format="csv", encoding="iso-8859-1") as resource:
assert len(resource.read_lists()) == 883
def test_buffer_loader_write():
source = Resource("data/table.csv")
target = source.write(Resource(scheme="buffer", format="csv"))
assert target.data == "id,name\r\n1,english\r\n2,中国人\r\n".encode("utf-8")
|
emails/testsuite/smtp_servers.py | MrTango/python-emails | 348 | 12673327 | # encoding: utf-8
import os
import platform
import datetime
import random
import time
from emails.compat import to_unicode
DEFAULT_FROM = os.environ.get('SMTP_TEST_FROM_EMAIL') or '<EMAIL>'
SUBJECT_SUFFIX = os.environ.get('SMTP_TEST_SUBJECT_SUFFIX')
def as_bool(value, default=False):
if value is None:
return default
return value.lower() in ('1', 'yes', 'true', 'on')
"""
Take environment variables if exists and send test letter
SMTP_TEST_SETS=GMAIL,OUTLOOK,YAMAIL
SMTP_TEST_GMAIL_TO=<EMAIL>
SMTP_TEST_GMAIL_USER=myuser
SMTP_TEST_GMAIL_PASSWORD=<PASSWORD>
SMTP_TEST_GMAIL_WITH_TLS=true
SMTP_TEST_GMAIL_WITHOUT_TLS=false
SMTP_TEST_GMAIL_HOST=alt1.gmail-smtp-in.l.google.com
SMTP_TEST_GMAIL_PORT=25
...
"""
def smtp_server_from_env(name='GMAIL'):
def _var(param, default=None):
v = os.environ.get('SMTP_TEST_{}_{}'.format(name, param), default)
return v
def _valid_smtp(data):
return data['host']
smtp_info = dict(
from_email=_var("FROM", default=DEFAULT_FROM),
to_email=_var("TO"),
host=_var('HOST'),
port=_var('PORT', default=25),
user=_var('USER'),
password=_var('PASSWORD')
)
if _valid_smtp(smtp_info):
if as_bool(_var('WITH_TLS')):
smtp_info['tls'] = True
sys_name = '{}_WITH_TLS'.format(name)
yield sys_name, smtp_info
if as_bool(_var('WITHOUT_TLS')):
smtp_info['tls'] = False
sys_name = '{}_WITHOUT_TLS'.format(name)
yield sys_name, smtp_info
class SMTPTestParams(object):
subject_prefix = '[python-emails]'
def __init__(self, from_email=None, to_email=None, defaults=None, **kw):
params = {'fail_silently': False, 'debug': 1, 'timeout': 25}
params.update(defaults or {})
params.update(kw)
self.params = params
self.from_email = from_email
self.to_email = to_email
def patch_message(self, message):
"""
Some SMTP requires from and to emails
"""
if self.from_email:
message.mail_from = (message.mail_from[0], self.from_email)
if self.to_email:
message.mail_to = self.to_email
# TODO: this code breaks template in subject; fix it
if not to_unicode(message.subject).startswith(self.subject_prefix) :
message.subject = " ".join([self.subject_prefix, message.subject,
'// %s' % SUBJECT_SUFFIX])
message._headers['X-Test-Date'] = datetime.datetime.utcnow().isoformat()
message._headers['X-Python-Version'] = "%s/%s" % (platform.python_version(), platform.platform())
message._headers['X-Build-Data'] = SUBJECT_SUFFIX
return message
def __str__(self):
return u'SMTPTestParams({user}@{host}:{port})'.format(host=self.params.get('host'),
port=self.params.get('port'),
user=self.params.get('user', ''))
def sleep(self):
if 'mailtrap' in self.params.get('host', ''):
t = 2 + random.randint(0, 2)
else:
t = 0.5
time.sleep(t)
def get_servers():
names = os.environ.get('SMTP_TEST_SETS', None)
if names:
for name in names.split(','):
for sys_name, params in smtp_server_from_env(name):
yield sys_name, SMTPTestParams(**params)
|
t/unit/test_values.py | faheel/billiard | 5,079 | 12673328 | from __future__ import absolute_import
import pytest
from billiard import Value, RawValue, Lock, Process
class test_values:
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', 'x'.encode('latin'), 'y'.encode('latin'))
]
def test_issue_229(self):
"""Test fix for issue #229"""
a = Value('i', 0)
b = Value('i', 0)
a.value = 5
assert a.value == 5
assert b.value == 0
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
assert sv.value == cv[1]
proc = Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
assert sv.value == cv[2]
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = Lock()
val3 = Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
assert lock == lock3
arr4 = Value('i', 5, lock=False)
assert not hasattr(arr4, 'get_lock')
assert not hasattr(arr4, 'get_obj')
with pytest.raises(AttributeError):
Value('i', 5, lock='navalue')
arr5 = RawValue('i', 5)
assert not hasattr(arr5, 'get_lock')
assert not hasattr(arr5, 'get_obj')
|
appengine/gallery_api/__init__.py | bharati-software/blockly-games-Kannada | 1,184 | 12673330 | <gh_stars>1000+
from common import *
|
ckanapi/version.py | muccg/ckanapi | 128 | 12673369 | <reponame>muccg/ckanapi
import pkg_resources
__version__ = pkg_resources.require("ckanapi")[0].version
|
nuitka/nodes/ImportHardNodes.py | byehack/Nuitka | 1,228 | 12673377 | <reponame>byehack/Nuitka<gh_stars>1000+
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes representing more trusted imports. """
from nuitka.importing.Importing import locateModule
from nuitka.utils.ModuleNames import ModuleName
from .ExpressionBases import ExpressionBase
class ExpressionImportHardBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
#
__slots__ = ("module_name", "finding", "module_filename")
def __init__(self, module_name, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.module_name = ModuleName(module_name)
self.finding = None
self.module_filename = None
_module_name, self.module_filename, self.finding = locateModule(
module_name=self.module_name,
parent_package=None,
level=0,
)
# Expect to find them and to match the name of course.
assert self.finding != "not-found", self.module_name
assert _module_name == self.module_name
def getUsedModule(self):
return self.module_name, self.module_filename, self.finding
class ExpressionImportModuleNameHardBase(ExpressionImportHardBase):
"""Hard import names base class."""
# Base classes can be abstract, pylint: disable=I0021,abstract-method
__slots__ = ("import_name", "finding", "module_filename")
def __init__(self, module_name, import_name, source_ref):
ExpressionImportHardBase.__init__(
self, module_name=module_name, source_ref=source_ref
)
self.import_name = import_name
# Derived ones have the same interface.
@staticmethod
def isExpressionImportModuleNameHard():
return True
def finalize(self):
del self.parent
def getDetails(self):
return {"module_name": self.module_name, "import_name": self.import_name}
def getModuleName(self):
return self.module_name
def getImportName(self):
return self.import_name
class ExpressionImportModuleNameHardMaybeExists(ExpressionImportModuleNameHardBase):
"""Hard coded import names, e.g. of "site.something"
These are created for attributes of hard imported modules that are not know if
they exist or not.
"""
kind = "EXPRESSION_IMPORT_MODULE_NAME_HARD_MAYBE_EXISTS"
def computeExpressionRaw(self, trace_collection):
trace_collection.onExceptionRaiseExit(AttributeError)
return self, None, None
@staticmethod
def mayHaveSideEffects():
return True
@staticmethod
def mayRaiseException(exception_type):
return True
class ExpressionImportModuleNameHardExists(ExpressionImportModuleNameHardBase):
"""Hard coded import names, e.g. of "sys.stdout"
These are directly created for some Python mechanics.
"""
kind = "EXPRESSION_IMPORT_MODULE_NAME_HARD_EXISTS"
def computeExpressionRaw(self, trace_collection):
# As good as it gets.
return self, None, None
@staticmethod
def mayHaveSideEffects():
return False
@staticmethod
def mayRaiseException(exception_type):
return False
|
run.py | svenvs/awesome-video-chat-backgrounds | 113 | 12673382 | from os import listdir
from os.path import isfile, join, abspath
image_path = abspath('./images')
onlyfiles = [f for f in listdir(image_path) if isfile(join(image_path, f))]
readme = '''# awesome-video-chat-backgrounds
Just in case you're at home on a video call and you haven't had time to tidy up your REAL background, here are some awesome backgrounds to help you get through your next video chat.
## Contributing
* Please submit pull requests to add additional photos/images to this collection!
* Images should be minimum of 1080 (width) x 550 (height) pixels
## Image List
'''
for file in onlyfiles:
title = file.split('.')[0].replace('_',' ').title()
readme += '<a href="./images/{}" title="{}"> <img align="center" src="./images/{}" width="540px"/></a>\n'.format(file, title, file)
with open('README.md','w+') as f:
f.write(readme)
|
discrete_systems_time_domain/animation.py | spatialaudio/signals-and-systems-lecture | 243 | 12673387 | <reponame>spatialaudio/signals-and-systems-lecture
"""Animations of common operations in signal processing."""
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def animate_discrete_convolution(x, h, y, k, kappa, interval=100):
def update_stem(stem, x, y):
stem.markerline.set_data(x, y)
for idx, stem_line in enumerate(stem.stemlines):
stem_line.set_data([x[idx], x[idx]], [0, y[idx]])
def animate(kappa_i):
update_stem(stem_x, k, x(k))
update_stem(stem_h, k, h(kappa_i - k))
dot.set_data(kappa_i, y[-k[0] + kappa_i])
# setup plot and define objects
default_figsize = plt.rcParams.get('figure.figsize')
fig, ax = plt.subplots(2, 1, figsize=(default_figsize[0],
1.5*default_figsize[1]))
fig.subplots_adjust(hspace=0.2)
plt.close() # suppresses empty plot in notebook
stem_x = ax[0].stem(k, x(k), linefmt='C0-', markerfmt='C0o',
basefmt=' ', label=r'$x[\kappa]$')
stem_h = ax[0].stem(k, h(kappa[0]-k), linefmt='C1-',
markerfmt='C1o', basefmt=' ', label=r'$h(k - \kappa)$')
ax[0].set_xlabel(r'$\kappa$')
ax[0].legend(loc='upper right')
ax[0].grid()
y = y(k)
ax[1].stem(k, y, linefmt='C2-', markerfmt='C2o', basefmt=' ',
label=r'$y[k]$')
dot, = ax[1].plot([], 'ro')
ax[1].set_xlabel(r'$k$')
ax[1].legend(loc='upper right')
ax[1].grid()
return FuncAnimation(fig, animate, kappa, interval=interval)
|
octodns/processor/filter.py | CyberFlameGO/octodns | 1,865 | 12673395 | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from .base import BaseProcessor
class TypeAllowlistFilter(BaseProcessor):
def __init__(self, name, allowlist):
super(TypeAllowlistFilter, self).__init__(name)
self.allowlist = set(allowlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type not in self.allowlist:
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process
class TypeRejectlistFilter(BaseProcessor):
def __init__(self, name, rejectlist):
super(TypeRejectlistFilter, self).__init__(name)
self.rejectlist = set(rejectlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type in self.rejectlist:
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process
|
control/keyboard/vtol_keyboard_multi_control.py | CNRoboComp2020/XTDrone | 457 | 12673397 | <reponame>CNRoboComp2020/XTDrone
import rospy
from geometry_msgs.msg import Pose, Twist
import sys, select, os
import tty, termios
from std_msgs.msg import String
MAX_LINEAR = 1000
MAX_ANG_VEL = 0.5
LINEAR_STEP_SIZE = 0.1
ANG_VEL_STEP_SIZE = 0.01
ctrl_leader = False
send_flag = False
transition_state = 'multirotor'
msg2all = """
Control Your XTDrone!
To all drones (press g to control the leader)
---------------------------
1 2 3 4 5 6 7 8 9 0
w r t y i
a s d g j k l
x v b n ,
w/x : increase/decrease north setpoint
a/d : increase/decrease east setpoint
i/, : increase/decrease upward setpoint
j/l : increase/decrease orientation
r : return home
t/y : arm/disarm
v/n : takeoff/land
b : offboard
s : hover(multirotor mode), loiter(plane mode)
k : hover(multirotor mode), idle(plane mode)
0~9 : extendable mission(eg.different formation configuration)
this will mask the keyboard control
g : control the leader
o : transition
CTRL-C to quit
"""
msg2leader = """
Control Your XTDrone!
To the leader (press g to control all drones)
---------------------------
1 2 3 4 5 6 7 8 9 0
w r t y i
a s d g j k l
x v b n ,
w/x : increase/decrease north setpoint
a/d : increase/decrease east setpoint
i/, : increase/decrease upward setpoint
j/l : increase/decrease orientation
r : return home
t/y : arm/disarm
v/n : takeoff/land
b : offboard
s : hover(multirotor mode), loiter(plane mode)
k : hover(multirotor mode), idle(plane mode)
0~9 : extendable mission(eg.different formation configuration)
g : control all drones
o : transition
CTRL-C to quit
"""
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def print_msg():
if ctrl_leader:
print(msg2leader)
else:
print(msg2all)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
vehicle_type = sys.argv[1]
vehicle_num = int(sys.argv[2])
control_type = sys.argv[3]
rospy.init_node('vtol_keyboard_multi_control')
multi_cmd_pose_enu_pub = [None]*vehicle_num
multi_cmd_vel_flu_pub = [None]*vehicle_num
multi_cmd_pub = [None]*vehicle_num
for i in range(vehicle_num):
multi_cmd_pose_enu_pub[i] = rospy.Publisher('/xtdrone/'+vehicle_type+'_'+str(i)+'/cmd_pose_enu', Pose, queue_size=10)
if control_type == 'vel':
multi_cmd_vel_flu_pub[i] = rospy.Publisher('/xtdrone/'+vehicle_type+'_'+str(i)+'/cmd_vel_flu', Twist, queue_size=10)
else:
multi_cmd_accel_flu_pub[i] = rospy.Publisher('/xtdrone/'+multirotor_type+'_'+str(i)+'/cmd_accel_flu', Twist, queue_size=10)
multi_cmd_pub[i] = rospy.Publisher('/xtdrone/'+vehicle_type+'_'+str(i)+'/cmd',String,queue_size=10)
leader_cmd_pose_enu_pub = rospy.Publisher("/xtdrone/leader/cmd_pose_enu", Pose, queue_size=10)
if control_type == 'vel':
leader_cmd_vel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_vel_flu", Twist, queue_size=10)
else:
leader_cmd_accel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_accel_flu", Twist, queue_size=10)
leader_cmd_pub = rospy.Publisher("/xtdrone/leader_cmd", String, queue_size=10)
cmd= String()
pose = Pose()
twist = Twist()
forward = 0.0
leftward = 0.0
upward = 0.0
angular = 0.0
print_msg()
while(1):
key = getKey()
if key == 'w' :
forward = forward + LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'x' :
forward = forward - LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'a' :
leftward = leftward + LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'd' :
leftward = leftward - LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'i' :
upward = upward + LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == ',' :
upward = upward - LINEAR_STEP_SIZE
print_msg()
if control_type == 'vel':
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
else:
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'j':
angular = angular + ANG_VEL_STEP_SIZE
print_msg()
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'l':
angular = angular - ANG_VEL_STEP_SIZE
print_msg()
print("currently:\t forward vel %.2f\t leftward vel %.2f\t upward vel %.2f\t angular %.2f " % (forward, leftward, upward, angular))
elif key == 'r':
cmd = 'AUTO.RTL'
print_msg()
print('Returning home')
elif key == 't':
cmd = 'ARM'
print_msg()
print('Arming')
elif key == 'y':
cmd = 'DISARM'
print_msg()
print('Disarming')
elif key == 'v':
cmd = 'AUTO.TAKEOFF'
print_msg()
print('AUTO.TAKEOFF')
elif key == 'b':
cmd = 'OFFBOARD'
print_msg()
print('Offboard')
elif key == 'n':
cmd = 'AUTO.LAND'
print_msg()
print('AUTO.LAND')
elif key == 'g':
ctrl_leader = not ctrl_leader
print_msg()
elif key == 's':
if transition_state == 'multirotor':
forward = 0.0
leftward = 0.0
upward = 0.0
angular = 0.0
cmd = 'HOVER'
else:
cmd = 'loiter'
print_msg()
print(cmd)
elif key == 'k' :
if transition_state == 'multirotor':
forward = 0.0
leftward = 0.0
upward = 0.0
angular = 0.0
cmd = 'HOVER'
else:
cmd = 'idle'
print_msg()
print(cmd)
elif key == 'o':
if transition_state == 'multirotor':
transition_state = 'plane'
cmd = transition_state
else:
transition_state = 'multirotor'
cmd = transition_state
print_msg()
print(cmd)
else:
for i in range(10):
if key == str(i):
cmd = 'mission'+key
print_msg()
print(cmd)
if (key == '\x03'):
break
if forward > MAX_LINEAR:
forward = MAX_LINEAR
elif forward < -MAX_LINEAR:
forward = -MAX_LINEAR
if leftward > MAX_LINEAR:
leftward = MAX_LINEAR
elif leftward < -MAX_LINEAR:
leftward = -MAX_LINEAR
if upward > MAX_LINEAR:
upward = MAX_LINEAR
elif upward < -MAX_LINEAR:
upward = -MAX_LINEAR
if angular > MAX_ANG_VEL:
angular = MAX_ANG_VEL
elif angular < -MAX_ANG_VEL:
angular = - MAX_ANG_VEL
if transition_state == 'plane':
pose.position.x = forward; pose.position.y = leftward; pose.position.z = upward
pose.orientation.x = 0.0; pose.orientation.y = 0.0; pose.orientation.z = angular
else:
twist.linear.x = forward; twist.linear.y = leftward; twist.linear.z = upward
twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = angular
for i in range(vehicle_num):
if ctrl_leader:
if transition_state == 'plane':
leader_cmd_pose_enu_pub.publish(pose)
else:
if control_type == 'vel':
leader_cmd_vel_flu_pub.publish(twist)
else:
leader_cmd_aceel_flu_pub.publish(twist)
leader_cmd_pub.publish(cmd)
else:
if transition_state == 'plane':
multi_cmd_pose_enu_pub[i].publish(pose)
else:
if control_type == 'vel':
multi_cmd_vel_flu_pub[i].publish(twist)
else:
multi_cmd_accel_flu_pub[i].publish(twist)
multi_cmd_pub[i].publish(cmd)
cmd = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
tests/bots/stocks/technical_analysis/test_aroon.py | tehcoderer/GamestonkTerminal | 255 | 12673399 | import pytest
try:
from bots.stocks.technical_analysis.aroon import aroon_command
except ImportError:
pytest.skip(allow_module_level=True)
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("period1", "MOCK_PERIOD_1"),
("period2", "MOCK_PERIOD_2"),
("date", "MOCK_DATE"),
],
}
@pytest.mark.vcr
@pytest.mark.bots
@pytest.mark.parametrize(
"start, end, extended, heikin, news",
[
("", "", False, False, False),
("2022-01-01", "2022-04-01", True, True, True),
],
)
def test_aroon_command(recorder, start, end, extended, heikin, news):
value = aroon_command(
"TSLA",
start=start,
end=end,
extended_hours=extended,
heikin_candles=heikin,
news=news,
)
value["imagefile"] = value["imagefile"][-4:]
recorder.capture(value)
@pytest.mark.vcr
@pytest.mark.bots
@pytest.mark.parametrize("ticker", ["", "ZZZZ"])
def test_aroon_invalid(ticker):
with pytest.raises(Exception):
aroon_command(ticker)
|
hail/python/test/hail/matrixtable/test_matrix_table.py | tdeboer-ilmn/hail | 789 | 12673412 | <filename>hail/python/test/hail/matrixtable/test_matrix_table.py
import math
import operator
import random
import pytest
import hail as hl
import hail.expr.aggregators as agg
from hail.utils.java import Env
from hail.utils.misc import new_temp_file
from ..helpers import *
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
def get_mt(self, min_partitions=None) -> hl.MatrixTable:
return hl.import_vcf(resource("sample.vcf"), min_partitions=min_partitions)
def test_range_count(self):
self.assertEqual(hl.utils.range_matrix_table(7, 13).count(), (7, 13))
def test_row_key_field_show_runs(self):
ds = self.get_mt()
ds.locus.show()
def test_update(self):
mt = self.get_mt()
mt = mt.select_entries(dp=mt.DP, gq=mt.GQ)
self.assertTrue(schema_eq(mt.entry.dtype, hl.tstruct(dp=hl.tint32, gq=hl.tint32)))
def test_annotate(self):
mt = self.get_mt()
mt = mt.annotate_globals(foo=5)
self.assertEqual(mt.globals.dtype, hl.tstruct(foo=hl.tint32))
mt = mt.annotate_rows(x1=agg.count(),
x2=agg.fraction(False),
x3=agg.count_where(True),
x4=mt.info.AC + mt.foo)
mt = mt.annotate_cols(apple=6)
mt = mt.annotate_cols(y1=agg.count(),
y2=agg.fraction(False),
y3=agg.count_where(True),
y4=mt.foo + mt.apple)
expected_schema = hl.tstruct(s=hl.tstr, apple=hl.tint32, y1=hl.tint64, y2=hl.tfloat64, y3=hl.tint64,
y4=hl.tint32)
self.assertTrue(schema_eq(mt.col.dtype, expected_schema),
"expected: " + str(mt.col.dtype) + "\nactual: " + str(expected_schema))
mt = mt.select_entries(z1=mt.x1 + mt.foo,
z2=mt.x1 + mt.y1 + mt.foo)
self.assertTrue(schema_eq(mt.entry.dtype, hl.tstruct(z1=hl.tint64, z2=hl.tint64)))
def test_annotate_globals(self):
mt = hl.utils.range_matrix_table(1, 1)
ht = hl.utils.range_table(1, 1)
data = [
(5, hl.tint, operator.eq),
(float('nan'), hl.tfloat32, lambda x, y: str(x) == str(y)),
(float('inf'), hl.tfloat64, lambda x, y: str(x) == str(y)),
(float('-inf'), hl.tfloat64, lambda x, y: str(x) == str(y)),
(1.111, hl.tfloat64, operator.eq),
([hl.Struct(**{'a': None, 'b': 5}),
hl.Struct(**{'a': 'hello', 'b': 10})], hl.tarray(hl.tstruct(a=hl.tstr, b=hl.tint)), operator.eq)
]
for x, t, f in data:
self.assertTrue(f(hl.eval(mt.annotate_globals(foo=hl.literal(x, t)).foo), x), f"{x}, {t}")
self.assertTrue(f(hl.eval(ht.annotate_globals(foo=hl.literal(x, t)).foo), x), f"{x}, {t}")
def test_head(self):
# no empty partitions
mt1 = hl.utils.range_matrix_table(10, 10)
# empty partitions at front
mt2 = hl.utils.range_matrix_table(20, 10, 20)
mt2 = mt2.filter_rows(mt2.row_idx > 9)
mts = [mt1, mt2]
for mt in mts:
tmp_file = new_temp_file(extension='mt')
mt.write(tmp_file)
mt_readback = hl.read_matrix_table(tmp_file)
for mt_ in [mt, mt_readback]:
assert mt_.head(1).count_rows() == 1
assert mt_.head(1)._force_count_rows() == 1
assert mt_.head(100).count_rows() == 10
assert mt_.head(100)._force_count_rows() == 10
def test_head_cols(self):
mt1 = hl.utils.range_matrix_table(10, 10)
assert mt1.head(1, 2).count() == (1, 2)
assert mt1.head(1, None).count() == (1, 10)
assert mt1.head(None, 1).count() == (10, 1)
def test_tail(self):
# no empty partitions
mt1 = hl.utils.range_matrix_table(10, 10)
# empty partitions at front
mt2 = hl.utils.range_matrix_table(20, 10, 20)
mt2 = mt2.filter_rows(mt2.row_idx > 9)
mts = [mt1, mt2]
for mt in mts:
tmp_file = new_temp_file(extension='mt')
mt.write(tmp_file)
mt_readback = hl.read_matrix_table(tmp_file)
for mt_ in [mt, mt_readback]:
assert mt_.tail(1).count_rows() == 1
assert mt_.tail(1)._force_count_rows() == 1
assert mt_.tail(100).count_rows() == 10
assert mt_.tail(100)._force_count_rows() == 10
def test_tail_cols(self):
mt1 = hl.utils.range_matrix_table(10, 10)
assert mt1.tail(1, 2).count() == (1, 2)
assert mt1.tail(1, None).count() == (1, 10)
assert mt1.tail(None, 1).count() == (10, 1)
@fails_service_backend()
def test_tail_entries(self):
mt = hl.utils.range_matrix_table(100, 30)
mt = mt.filter_cols(mt.col_idx != 29)
def tail(*args):
ht = mt.tail(*args).entries()
return ht.aggregate(hl.agg.collect_as_set(hl.tuple([ht.row_idx, ht.col_idx])))
def expected(n, m):
return set((i, j) for i in range(100 - n, 100) for j in range(29 - m, 29))
assert tail(None, 10) == expected(100, 10)
assert tail(30, None) == expected(30, 29)
assert tail(30, 10) == expected(30, 10)
@fails_service_backend()
def test_tail_scan(self):
mt = hl.utils.range_matrix_table(30, 40)
mt = mt.annotate_rows(i = hl.scan.count())
mt = mt.annotate_cols(j = hl.scan.count())
mt = mt.tail(10, 11)
ht = mt.entries()
assert ht.aggregate(agg.collect_as_set(hl.tuple([ht.i, ht.j]))) == set(
(i, j) for i in range(20, 30) for j in range(29, 40)
)
def test_filter(self):
mt = self.get_mt()
mt = mt.annotate_globals(foo=5)
mt = mt.annotate_rows(x1=agg.count())
mt = mt.annotate_cols(y1=agg.count())
mt = mt.annotate_entries(z1=mt.DP)
mt = mt.filter_rows((mt.x1 == 5) & (agg.count() == 3) & (mt.foo == 2))
mt = mt.filter_cols((mt.y1 == 5) & (agg.count() == 3) & (mt.foo == 2))
mt = mt.filter_entries((mt.z1 < 5) & (mt.y1 == 3) & (mt.x1 == 5) & (mt.foo == 2))
mt.count_rows()
@fails_service_backend()
def test_aggregate(self):
mt = self.get_mt()
mt = mt.annotate_globals(foo=5)
mt = mt.annotate_rows(x1=agg.count())
mt = mt.annotate_cols(y1=agg.count())
mt = mt.annotate_entries(z1=mt.DP)
qv = mt.aggregate_rows(agg.count())
qs = mt.aggregate_cols(agg.count())
qg = mt.aggregate_entries(agg.count())
self.assertIsNotNone(mt.aggregate_entries(hl.agg.take(mt.s, 1)[0]))
self.assertEqual(qv, 346)
self.assertEqual(qs, 100)
self.assertEqual(qg, qv * qs)
qvs = mt.aggregate_rows(hl.Struct(x=agg.collect(mt.locus.contig),
y=agg.collect(mt.x1)))
qss = mt.aggregate_cols(hl.Struct(x=agg.collect(mt.s),
y=agg.collect(mt.y1)))
qgs = mt.aggregate_entries(hl.Struct(x=agg.filter(False, agg.collect(mt.y1)),
y=agg.filter(hl.rand_bool(0.1), agg.collect(mt.GT))))
def test_aggregate_rows_array_agg(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.annotate_rows(maf_flag = hl.empty_array('bool'))
mt.aggregate_rows(hl.agg.array_agg(lambda x: hl.agg.counter(x), mt.maf_flag))
def test_aggregate_rows_bn_counter(self):
r = hl.balding_nichols_model(3, 10, 10).rows()
r.aggregate(hl.agg.counter(r.locus.in_x_nonpar()))
def test_col_agg_no_rows(self):
mt = hl.utils.range_matrix_table(3, 3).filter_rows(False)
mt = mt.annotate_cols(x = hl.agg.count())
assert mt.x.collect() == [0, 0, 0]
def test_col_collect(self):
mt = hl.utils.range_matrix_table(3, 3)
mt.cols().collect()
def test_aggregate_ir(self):
ds = (hl.utils.range_matrix_table(5, 5)
.annotate_globals(g1=5)
.annotate_entries(e1=3))
x = [("col_idx", lambda e: ds.aggregate_cols(e)),
("row_idx", lambda e: ds.aggregate_rows(e))]
for name, f in x:
r = f(hl.struct(x=agg.sum(ds[name]) + ds.g1,
y=agg.filter(ds[name] % 2 != 0, agg.sum(ds[name] + 2)) + ds.g1,
z=agg.sum(ds.g1 + ds[name]) + ds.g1,
mean=agg.mean(ds[name])))
self.assertEqual(convert_struct_to_dict(r), {u'x': 15, u'y': 13, u'z': 40, u'mean': 2.0})
r = f(5)
self.assertEqual(r, 5)
r = f(hl.missing(hl.tint32))
self.assertEqual(r, None)
r = f(agg.filter(ds[name] % 2 != 0, agg.sum(ds[name] + 2)) + ds.g1)
self.assertEqual(r, 13)
r = ds.aggregate_entries(agg.filter((ds.row_idx % 2 != 0) & (ds.col_idx % 2 != 0),
agg.sum(ds.e1 + ds.g1 + ds.row_idx + ds.col_idx)) + ds.g1)
self.assertTrue(r, 48)
def test_select_entries(self):
mt = hl.utils.range_matrix_table(10, 10, n_partitions=4)
mt = mt.annotate_entries(a=hl.struct(b=mt.row_idx, c=mt.col_idx), foo=mt.row_idx * 10 + mt.col_idx)
mt = mt.select_entries(mt.a.b, mt.a.c, mt.foo)
mt = mt.annotate_entries(bc=mt.b * 10 + mt.c)
mt_entries = mt.entries()
assert (mt_entries.all(mt_entries.bc == mt_entries.foo))
def test_select_cols(self):
mt = hl.utils.range_matrix_table(3, 5, n_partitions=4)
mt = mt.annotate_entries(e=mt.col_idx * mt.row_idx)
mt = mt.annotate_globals(g=1)
mt = mt.annotate_cols(sum=agg.sum(mt.e + mt.col_idx + mt.row_idx + mt.g) + mt.col_idx + mt.g,
count=agg.count_where(mt.e % 2 == 0),
foo=agg.count())
result = convert_struct_to_dict(mt.cols().collect()[-2])
self.assertEqual(result, {'col_idx': 3, 'sum': 28, 'count': 2, 'foo': 3})
def test_drop(self):
mt = self.get_mt()
mt = mt.annotate_globals(foo=5)
mt = mt.annotate_cols(bar=5)
mt1 = mt.drop('GT', 'info', 'foo', 'bar')
self.assertTrue('foo' not in mt1.globals)
self.assertTrue('info' not in mt1.row)
self.assertTrue('bar' not in mt1.col)
self.assertTrue('GT' not in mt1.entry)
mt1._force_count_rows()
mt2 = mt.drop(mt.GT, mt.info, mt.foo, mt.bar)
self.assertTrue('foo' not in mt2.globals)
self.assertTrue('info' not in mt2.row)
self.assertTrue('bar' not in mt2.col)
self.assertTrue('GT' not in mt2.entry)
mt2._force_count_rows()
def test_explode_rows(self):
mt = hl.utils.range_matrix_table(4, 4)
mt = mt.annotate_entries(e=mt.row_idx * 10 + mt.col_idx)
self.assertTrue(mt.annotate_rows(x=[1]).explode_rows('x').drop('x')._same(mt))
self.assertEqual(mt.annotate_rows(x=hl.empty_array('int')).explode_rows('x').count_rows(), 0)
self.assertEqual(mt.annotate_rows(x=hl.missing('array<int>')).explode_rows('x').count_rows(), 0)
self.assertEqual(mt.annotate_rows(x=hl.range(0, mt.row_idx)).explode_rows('x').count_rows(), 6)
mt = mt.annotate_rows(x=hl.struct(y=hl.range(0, mt.row_idx)))
self.assertEqual(mt.explode_rows(mt.x.y).count_rows(), 6)
def test_explode_cols(self):
mt = hl.utils.range_matrix_table(4, 4)
mt = mt.annotate_entries(e=mt.row_idx * 10 + mt.col_idx)
self.assertTrue(mt.annotate_cols(x=[1]).explode_cols('x').drop('x')._same(mt))
self.assertEqual(mt.annotate_cols(x=hl.empty_array('int')).explode_cols('x').count_cols(), 0)
self.assertEqual(mt.annotate_cols(x=hl.missing('array<int>')).explode_cols('x').count_cols(), 0)
self.assertEqual(mt.annotate_cols(x=hl.range(0, mt.col_idx)).explode_cols('x').count_cols(), 6)
def test_explode_key_errors(self):
mt = hl.utils.range_matrix_table(1, 1).key_cols_by(a=[1]).key_rows_by(b=[1])
with self.assertRaises(ValueError):
mt.explode_cols('a')
with self.assertRaises(ValueError):
mt.explode_rows('b')
def test_group_by_field_lifetimes(self):
mt = hl.utils.range_matrix_table(3, 3)
mt2 = (mt.group_rows_by(row_idx='100')
.aggregate(x=hl.agg.collect_as_set(mt.row_idx + 5)))
assert mt2.aggregate_entries(hl.agg.all(mt2.x == hl.set({5, 6, 7})))
mt3 = (mt.group_cols_by(col_idx='100')
.aggregate(x=hl.agg.collect_as_set(mt.col_idx + 5)))
assert mt3.aggregate_entries(hl.agg.all(mt3.x == hl.set({5, 6, 7})))
def test_aggregate_cols_by(self):
mt = hl.utils.range_matrix_table(2, 4)
mt = (mt.annotate_cols(group=mt.col_idx < 2)
.annotate_globals(glob=5))
grouped = mt.group_cols_by(mt.group)
result = grouped.aggregate(sum=hl.agg.sum(mt.row_idx * 2 + mt.col_idx + mt.glob) + 3)
expected = (hl.Table.parallelize([
{'row_idx': 0, 'group': True, 'sum': 14},
{'row_idx': 0, 'group': False, 'sum': 18},
{'row_idx': 1, 'group': True, 'sum': 18},
{'row_idx': 1, 'group': False, 'sum': 22}
], hl.tstruct(row_idx=hl.tint, group=hl.tbool, sum=hl.tint64))
.annotate_globals(glob=5)
.key_by('row_idx', 'group'))
self.assertTrue(result.entries()._same(expected))
def test_aggregate_cols_by_init_op(self):
mt = hl.import_vcf(resource('sample.vcf'))
cs = mt.group_cols_by(mt.s).aggregate(cs = hl.agg.call_stats(mt.GT, mt.alleles))
cs._force_count_rows() # should run without error
def test_aggregate_cols_scope_violation(self):
mt = get_dataset()
with pytest.raises(hl.expr.ExpressionException) as exc:
mt.aggregate_cols(hl.agg.filter(False, hl.agg.sum(mt.GT.is_non_ref())))
assert "scope violation" in str(exc.value)
def test_aggregate_rows_by(self):
mt = hl.utils.range_matrix_table(4, 2)
mt = (mt.annotate_rows(group=mt.row_idx < 2)
.annotate_globals(glob=5))
grouped = mt.group_rows_by(mt.group)
result = grouped.aggregate(sum=hl.agg.sum(mt.col_idx * 2 + mt.row_idx + mt.glob) + 3)
expected = (hl.Table.parallelize([
{'col_idx': 0, 'group': True, 'sum': 14},
{'col_idx': 1, 'group': True, 'sum': 18},
{'col_idx': 0, 'group': False, 'sum': 18},
{'col_idx': 1, 'group': False, 'sum': 22}
], hl.tstruct(group=hl.tbool, col_idx=hl.tint, sum=hl.tint64))
.annotate_globals(glob=5)
.key_by('group', 'col_idx'))
self.assertTrue(result.entries()._same(expected))
def test_collect_cols_by_key(self):
mt = hl.utils.range_matrix_table(3, 3)
col_dict = hl.literal({0: [1], 1: [2, 3], 2: [4, 5, 6]})
mt = mt.annotate_cols(foo=col_dict.get(mt.col_idx)) \
.explode_cols('foo')
mt = mt.annotate_entries(bar=mt.row_idx * mt.foo)
grouped = mt.collect_cols_by_key()
self.assertListEqual(grouped.cols().order_by('col_idx').collect(),
[hl.Struct(col_idx=0, foo=[1]),
hl.Struct(col_idx=1, foo=[2, 3]),
hl.Struct(col_idx=2, foo=[4, 5, 6])])
self.assertListEqual(
grouped.entries().select('bar')
.order_by('row_idx', 'col_idx').collect(),
[hl.Struct(row_idx=0, col_idx=0, bar=[0]),
hl.Struct(row_idx=0, col_idx=1, bar=[0, 0]),
hl.Struct(row_idx=0, col_idx=2, bar=[0, 0, 0]),
hl.Struct(row_idx=1, col_idx=0, bar=[1]),
hl.Struct(row_idx=1, col_idx=1, bar=[2, 3]),
hl.Struct(row_idx=1, col_idx=2, bar=[4, 5, 6]),
hl.Struct(row_idx=2, col_idx=0, bar=[2]),
hl.Struct(row_idx=2, col_idx=1, bar=[4, 6]),
hl.Struct(row_idx=2, col_idx=2, bar=[8, 10, 12])])
def test_weird_names(self):
ds = self.get_mt()
exprs = {'a': 5, ' a ': 5, r'\%!^!@#&#&$%#$%': [5], '$': 5, 'ß': 5}
ds.annotate_globals(**exprs)
ds.select_globals(**exprs)
ds.annotate_cols(**exprs)
ds1 = ds.select_cols(**exprs)
ds.annotate_rows(**exprs)
ds2 = ds.select_rows(**exprs)
ds.annotate_entries(**exprs)
ds.select_entries(**exprs)
ds1.explode_cols(r'\%!^!@#&#&$%#$%')
ds1.explode_cols(ds1[r'\%!^!@#&#&$%#$%'])
ds1.group_cols_by(ds1.a).aggregate(**{'*``81': agg.count()})
ds1.drop(r'\%!^!@#&#&$%#$%')
ds1.drop(ds1[r'\%!^!@#&#&$%#$%'])
ds2.explode_rows(r'\%!^!@#&#&$%#$%')
ds2.explode_rows(ds2[r'\%!^!@#&#&$%#$%'])
ds2.group_rows_by(ds2.a).aggregate(**{'*``81': agg.count()})
def test_semi_anti_join_rows(self):
mt = hl.utils.range_matrix_table(10, 3)
ht = hl.utils.range_table(3)
assert mt.semi_join_rows(ht).count() == (3, 3)
assert mt.anti_join_rows(ht).count() == (7, 3)
def test_semi_anti_join_cols(self):
mt = hl.utils.range_matrix_table(3, 10)
ht = hl.utils.range_table(3)
assert mt.semi_join_cols(ht).count() == (3, 3)
assert mt.anti_join_cols(ht).count() == (3, 7)
@fails_service_backend()
def test_joins(self):
mt = self.get_mt().select_rows(x1=1, y1=1)
mt2 = mt.select_rows(x2=1, y2=2)
mt2 = mt2.select_cols(c1=1, c2=2)
mt = mt.annotate_rows(y2=mt2.index_rows(mt.row_key).y2)
mt = mt.annotate_cols(c2=mt2.index_cols(mt.s).c2)
mt = mt.annotate_cols(c2=mt2.index_cols(hl.str(mt.s)).c2)
rt = mt.rows()
ct = mt.cols()
mt.annotate_rows(**rt[mt.locus, mt.alleles])
self.assertTrue(rt.all(rt.y2 == 2))
self.assertTrue(ct.all(ct.c2 == 2))
@fails_service_backend()
def test_joins_with_key_structs(self):
mt = self.get_mt()
rows = mt.rows()
cols = mt.cols()
self.assertEqual(rows[mt.locus, mt.alleles].take(1), rows[mt.row_key].take(1))
self.assertEqual(cols[mt.s].take(1), cols[mt.col_key].take(1))
self.assertEqual(mt.index_rows(mt.row_key).take(1), mt.index_rows(mt.locus, mt.alleles).take(1))
self.assertEqual(mt.index_cols(mt.col_key).take(1), mt.index_cols(mt.s).take(1))
self.assertEqual(mt[mt.row_key, mt.col_key].take(1), mt[(mt.locus, mt.alleles), mt.s].take(1))
def test_index_keyless(self):
mt = hl.utils.range_matrix_table(3, 3)
with self.assertRaisesRegex(hl.expr.ExpressionException, "MatrixTable row key: *<<<empty key>>>"):
mt.key_rows_by().index_rows(mt.row_idx)
with self.assertRaisesRegex(hl.expr.ExpressionException, "MatrixTable col key: *<<<empty key>>>"):
mt.key_cols_by().index_cols(mt.col_idx)
def test_table_join(self):
ds = self.get_mt()
# test different row schemas
self.assertTrue(ds.union_cols(ds.drop(ds.info))
.count_rows(), 346)
@skip_when_service_backend('''The Service and Shuffler have no way of knowing the order in which rows appear in the original
dataset, as such it is impossible to guarantee the ordering in `matches`.
https://hail.zulipchat.com/#narrow/stream/123011-Hail-Dev/topic/test_drop/near/235425714''')
def test_table_product_join(self):
left = hl.utils.range_matrix_table(5, 1)
right = hl.utils.range_table(5)
right = right.annotate(i=hl.range(right.idx + 1, 5)).explode('i').key_by('i')
left = left.annotate_rows(matches=right.index(left.row_key, all_matches=True))
rows = left.rows()
self.assertTrue(rows.all(rows.matches.map(lambda x: x.idx) == hl.range(0, rows.row_idx)))
@fails_service_backend()
@fails_local_backend()
def test_naive_coalesce(self):
mt = self.get_mt(min_partitions=8)
self.assertEqual(mt.n_partitions(), 8)
repart = mt.naive_coalesce(2)
self.assertTrue(mt._same(repart))
def test_coalesce_with_no_rows(self):
mt = self.get_mt().filter_rows(False)
self.assertEqual(mt.repartition(1).count_rows(), 0)
def test_literals_rebuild(self):
mt = hl.utils.range_matrix_table(1, 1)
mt = mt.annotate_rows(x=hl.if_else(hl.literal([1,2,3])[mt.row_idx] < hl.rand_unif(10, 11), mt.globals, hl.struct()))
mt._force_count_rows()
@fails_service_backend()
def test_globals_lowering(self):
mt = hl.utils.range_matrix_table(1, 1).annotate_globals(x=1)
lit = hl.literal(hl.utils.Struct(x = 0))
mt.annotate_rows(foo=hl.agg.collect(mt.globals == lit))._force_count_rows()
mt.annotate_cols(foo=hl.agg.collect(mt.globals == lit))._force_count_rows()
mt.filter_rows(mt.globals == lit)._force_count_rows()
mt.filter_cols(mt.globals == lit)._force_count_rows()
mt.filter_entries(mt.globals == lit)._force_count_rows()
(mt.group_rows_by(mt.row_idx)
.aggregate_rows(foo=hl.agg.collect(mt.globals == lit))
.aggregate(bar=hl.agg.collect(mt.globals == lit))
._force_count_rows())
(mt.group_cols_by(mt.col_idx)
.aggregate_cols(foo=hl.agg.collect(mt.globals == lit))
.aggregate(bar=hl.agg.collect(mt.globals == lit))
._force_count_rows())
@skip_when_service_backend('ShuffleRead non-deterministically causes segfaults')
def test_unions(self):
dataset = hl.import_vcf(resource('sample2.vcf'))
# test union_rows
ds1 = dataset.filter_rows(dataset.locus.position % 2 == 1)
ds2 = dataset.filter_rows(dataset.locus.position % 2 == 0)
datasets = [ds1, ds2]
r1 = ds1.union_rows(ds2)
r2 = hl.MatrixTable.union_rows(*datasets)
self.assertTrue(r1._same(r2))
with self.assertRaises(ValueError):
ds1.filter_cols(ds1.s.endswith('5')).union_rows(ds2)
# test union_cols
ds = dataset.union_cols(dataset).union_cols(dataset)
for s, count in ds.aggregate_cols(agg.counter(ds.s)).items():
self.assertEqual(count, 3)
@skip_when_service_backend('Shuffler encoding/decoding is broken.')
def test_union_cols_example(self):
joined = hl.import_vcf(resource('joined.vcf'))
left = hl.import_vcf(resource('joinleft.vcf'))
right = hl.import_vcf(resource('joinright.vcf'))
self.assertTrue(left.union_cols(right)._same(joined))
def test_union_cols_distinct(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.key_rows_by(x = mt.row_idx // 2)
assert mt.union_cols(mt).count_rows() == 5
@skip_when_service_backend('flaky https://hail.zulipchat.com/#narrow/stream/127527-team/topic/CI.20Deploy.20Failure/near/237593731')
def test_union_cols_outer(self):
r, c = 10, 10
mt = hl.utils.range_matrix_table(2*r, c)
mt = mt.annotate_entries(entry=hl.tuple([mt.row_idx, mt.col_idx]))
mt2 = hl.utils.range_matrix_table(2*r, c)
mt2 = mt2.key_rows_by(row_idx=mt2.row_idx + r)
mt2 = mt2.key_cols_by(col_idx=mt2.col_idx + c)
mt2 = mt2.annotate_entries(entry=hl.tuple([mt2.row_idx, mt2.col_idx]))
expected = hl.utils.range_matrix_table(3*r, 2*c)
missing = hl.missing(hl.ttuple(hl.tint, hl.tint))
expected = expected.annotate_entries(entry=hl.if_else(
expected.col_idx < c,
hl.if_else(expected.row_idx < 2*r, hl.tuple([expected.row_idx, expected.col_idx]), missing),
hl.if_else(expected.row_idx >= r, hl.tuple([expected.row_idx, expected.col_idx]), missing)))
assert mt.union_cols(mt2, row_join_type='outer')._same(expected)
def test_union_rows_different_col_schema(self):
mt = hl.utils.range_matrix_table(10, 10)
mt2 = hl.utils.range_matrix_table(10, 10)
mt2 = mt2.annotate_cols(x=mt2.col_idx + 1)
mt2 = mt2.annotate_globals(g="foo")
self.assertEqual(mt.union_rows(mt2).count_rows(), 20)
def test_index(self):
ds = self.get_mt(min_partitions=8)
self.assertEqual(ds.n_partitions(), 8)
ds = ds.add_row_index('rowidx').add_col_index('colidx')
for i, struct in enumerate(ds.cols().select('colidx').collect()):
self.assertEqual(i, struct.colidx)
for i, struct in enumerate(ds.rows().select('rowidx').collect()):
self.assertEqual(i, struct.rowidx)
def test_choose_cols(self):
ds = self.get_mt()
indices = list(range(ds.count_cols()))
random.shuffle(indices)
old_order = ds.key_cols_by()['s'].collect()
self.assertEqual(ds.choose_cols(indices).key_cols_by()['s'].collect(),
[old_order[i] for i in indices])
self.assertEqual(ds.choose_cols(list(range(10))).s.collect(),
old_order[:10])
@skip_when_service_backend('Shuffler encoding/decoding is broken.')
def test_choose_cols_vs_explode(self):
ds = self.get_mt()
ds2 = ds.annotate_cols(foo=[0, 0]).explode_cols('foo').drop('foo')
self.assertTrue(ds.choose_cols(sorted(list(range(ds.count_cols())) * 2))._same(ds2))
def test_distinct_by_row(self):
orig_mt = hl.utils.range_matrix_table(10, 10)
mt = orig_mt.key_rows_by(row_idx=orig_mt.row_idx // 2)
self.assertTrue(mt.distinct_by_row().count_rows() == 5)
self.assertTrue(orig_mt.union_rows(orig_mt).distinct_by_row()._same(orig_mt))
def test_distinct_by_col(self):
orig_mt = hl.utils.range_matrix_table(10, 10)
mt = orig_mt.key_cols_by(col_idx=orig_mt.col_idx // 2)
self.assertTrue(mt.distinct_by_col().count_cols() == 5)
self.assertTrue(orig_mt.union_cols(orig_mt).distinct_by_col()._same(orig_mt))
def test_aggregation_with_no_aggregators(self):
mt = hl.utils.range_matrix_table(3, 3)
self.assertEqual(mt.group_rows_by(mt.row_idx).aggregate().count_rows(), 3)
self.assertEqual(mt.group_cols_by(mt.col_idx).aggregate().count_cols(), 3)
@fails_service_backend()
def test_computed_key_join_1(self):
ds = self.get_mt()
kt = hl.Table.parallelize(
[{'key': 0, 'value': True},
{'key': 1, 'value': False}],
hl.tstruct(key=hl.tint32, value=hl.tbool),
key=['key'])
ds = ds.annotate_rows(key=ds.locus.position % 2)
ds = ds.annotate_rows(value=kt[ds['key']]['value'])
rt = ds.rows()
self.assertTrue(
rt.all(((rt.locus.position % 2) == 0) == rt['value']))
@fails_service_backend()
def test_computed_key_join_2(self):
# multiple keys
ds = self.get_mt()
kt = hl.Table.parallelize(
[{'key1': 0, 'key2': 0, 'value': 0},
{'key1': 1, 'key2': 0, 'value': 1},
{'key1': 0, 'key2': 1, 'value': -2},
{'key1': 1, 'key2': 1, 'value': -1}],
hl.tstruct(key1=hl.tint32, key2=hl.tint32, value=hl.tint32),
key=['key1', 'key2'])
ds = ds.annotate_rows(key1=ds.locus.position % 2, key2=ds.info.DP % 2)
ds = ds.annotate_rows(value=kt[ds.key1, ds.key2]['value'])
rt = ds.rows()
self.assertTrue(
rt.all((rt.locus.position % 2) - 2 * (rt.info.DP % 2) == rt['value']))
@fails_service_backend()
def test_computed_key_join_3(self):
# duplicate row keys
ds = self.get_mt()
kt = hl.Table.parallelize(
[{'culprit': 'InbreedingCoeff', 'foo': 'bar', 'value': 'IB'}],
hl.tstruct(culprit=hl.tstr, foo=hl.tstr, value=hl.tstr),
key=['culprit', 'foo'])
ds = ds.annotate_rows(
dsfoo='bar',
info=ds.info.annotate(culprit=[ds.info.culprit, "foo"]))
ds = ds.explode_rows(ds.info.culprit)
ds = ds.annotate_rows(value=kt[ds.info.culprit, ds.dsfoo]['value'])
rt = ds.rows()
self.assertTrue(
rt.all(hl.if_else(
rt.info.culprit == "InbreedingCoeff",
rt['value'] == "IB",
hl.is_missing(rt['value']))))
@fails_service_backend()
@fails_local_backend()
def test_interval_join(self):
left = hl.utils.range_matrix_table(50, 1, n_partitions=10)
intervals = hl.utils.range_table(4)
intervals = intervals.key_by(interval=hl.interval(intervals.idx * 10, intervals.idx * 10 + 5))
left = left.annotate_rows(interval_matches=intervals.index(left.row_key))
rows = left.rows()
self.assertTrue(rows.all(hl.case()
.when(rows.row_idx % 10 < 5, rows.interval_matches.idx == rows.row_idx // 10)
.default(hl.is_missing(rows.interval_matches))))
@fails_service_backend()
@fails_local_backend()
def test_interval_product_join(self):
left = hl.utils.range_matrix_table(50, 1, n_partitions=8)
intervals = hl.utils.range_table(25)
intervals = intervals.key_by(interval=hl.interval(
1 + (intervals.idx // 5) * 10 + (intervals.idx % 5),
(1 + intervals.idx // 5) * 10 - (intervals.idx % 5)))
intervals = intervals.annotate(i=intervals.idx % 5)
left = left.annotate_rows(interval_matches=intervals.index(left.row_key, all_matches=True))
rows = left.rows()
self.assertTrue(rows.all(hl.sorted(rows.interval_matches.map(lambda x: x.i))
== hl.range(0, hl.min(rows.row_idx % 10, 10 - rows.row_idx % 10))))
def test_entry_join_self(self):
mt1 = hl.utils.range_matrix_table(10, 10, n_partitions=4).choose_cols([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
mt1 = mt1.annotate_entries(x=10 * mt1.row_idx + mt1.col_idx)
self.assertEqual(mt1[mt1.row_idx, mt1.col_idx].dtype, mt1.entry.dtype)
mt_join = mt1.annotate_entries(x2=mt1[mt1.row_idx, mt1.col_idx].x)
mt_join_entries = mt_join.entries()
self.assertTrue(mt_join_entries.all(mt_join_entries.x == mt_join_entries.x2))
@fails_service_backend()
def test_entry_join_const(self):
mt1 = hl.utils.range_matrix_table(10, 10, n_partitions=4)
mt1 = mt1.annotate_entries(x=mt1.row_idx + mt1.col_idx)
mt2 = hl.utils.range_matrix_table(1, 1, n_partitions=1)
mt2 = mt2.annotate_entries(foo=10101)
mt_join = mt1.annotate_entries(**mt2[mt1.row_idx // 100, mt1.col_idx // 100])
mt_join_entries = mt_join.entries()
self.assertTrue(mt_join_entries.all(mt_join_entries['foo'] == 10101))
def test_entry_join_missingness(self):
mt1 = hl.utils.range_matrix_table(10, 10, n_partitions=4)
mt1 = mt1.annotate_entries(x=mt1.row_idx + mt1.col_idx)
mt2 = mt1.filter_cols(mt1.col_idx % 2 == 0)
mt2 = mt2.filter_rows(mt2.row_idx % 2 == 0)
mt_join = mt1.annotate_entries(x2=mt2[mt1.row_idx, mt1.col_idx].x * 10)
mt_join_entries = mt_join.entries()
kept = mt_join_entries.filter((mt_join_entries.row_idx % 2 == 0) & (mt_join_entries.col_idx % 2 == 0))
removed = mt_join_entries.filter(~((mt_join_entries.row_idx % 2 == 0) & (mt_join_entries.col_idx % 2 == 0)))
self.assertTrue(kept.all(hl.is_defined(kept.x2) & (kept.x2 == kept.x * 10)))
self.assertTrue(removed.all(hl.is_missing(removed.x2)))
def test_entries_table_length_and_fields(self):
mt = hl.utils.range_matrix_table(10, 10, n_partitions=4)
mt = mt.annotate_entries(x=mt.col_idx + mt.row_idx)
et = mt.entries()
self.assertEqual(et.count(), 100)
self.assertTrue(et.all(et.x == et.col_idx + et.row_idx))
def test_entries_table_no_keys(self):
mt = hl.utils.range_matrix_table(2, 2)
mt = mt.annotate_entries(x = (mt.row_idx, mt.col_idx))
original_order = [
hl.utils.Struct(row_idx=0, col_idx=0, x=(0, 0)),
hl.utils.Struct(row_idx=0, col_idx=1, x=(0, 1)),
hl.utils.Struct(row_idx=1, col_idx=0, x=(1, 0)),
hl.utils.Struct(row_idx=1, col_idx=1, x=(1, 1)),
]
assert mt.entries().collect() == original_order
assert mt.key_cols_by().entries().collect() == original_order
assert mt.key_rows_by().key_cols_by().entries().collect() == original_order
assert mt.key_rows_by().entries().collect() == sorted(original_order, key=lambda x: x.col_idx)
@fails_service_backend()
def test_entries_table_with_out_of_order_row_key_fields(self):
mt = hl.utils.range_matrix_table(10, 10, 1)
mt = mt.select_rows(key2=0, key1=mt.row_idx)
mt = mt.key_rows_by(mt.key1, mt.key2)
mt.entries()._force_count()
def test_filter_cols_required_entries(self):
mt1 = hl.utils.range_matrix_table(10, 10, n_partitions=4)
mt1 = mt1.filter_cols(mt1.col_idx < 3)
self.assertEqual(len(mt1.entries().collect()), 30)
def test_filter_cols_with_global_references(self):
mt = hl.utils.range_matrix_table(10, 10)
s = hl.literal({1, 3, 5, 7})
self.assertEqual(mt.filter_cols(s.contains(mt.col_idx)).count_cols(), 4)
def test_filter_cols_agg(self):
mt = hl.utils.range_matrix_table(10, 10)
assert mt.filter_cols(hl.agg.count() > 5).count_cols() == 10
def test_vcf_regression(self):
ds = hl.import_vcf(resource('33alleles.vcf'))
self.assertEqual(
ds.filter_rows(ds.alleles.length() == 2).count_rows(), 0)
def test_field_groups(self):
ds = self.get_mt()
df = ds.annotate_rows(row_struct=ds.row).rows()
self.assertTrue(df.all((df.info == df.row_struct.info) & (df.qual == df.row_struct.qual)))
ds2 = ds.add_col_index()
df = ds2.annotate_cols(col_struct=ds2.col).cols()
self.assertTrue(df.all((df.col_idx == df.col_struct.col_idx)))
df = ds.annotate_entries(entry_struct=ds.entry).entries()
self.assertTrue(df.all(
((hl.is_missing(df.GT) |
(df.GT == df.entry_struct.GT)) &
(df.AD == df.entry_struct.AD))))
@fails_service_backend()
@fails_local_backend()
def test_filter_partitions(self):
ds = self.get_mt(min_partitions=8)
self.assertEqual(ds.n_partitions(), 8)
self.assertEqual(ds._filter_partitions([0, 1, 4]).n_partitions(), 3)
self.assertEqual(ds._filter_partitions(range(3)).n_partitions(), 3)
self.assertEqual(ds._filter_partitions([4, 5, 7], keep=False).n_partitions(), 5)
self.assertTrue(
ds._same(hl.MatrixTable.union_rows(
ds._filter_partitions([0, 3, 7]),
ds._filter_partitions([0, 3, 7], keep=False))))
@skip_when_service_backend('Shuffler encoding/decoding is broken.')
def test_from_rows_table(self):
mt = hl.import_vcf(resource('sample.vcf'))
mt = mt.annotate_globals(foo='bar')
rt = mt.rows()
rm = hl.MatrixTable.from_rows_table(rt)
self.assertTrue(rm._same(mt.filter_cols(False).select_entries().key_cols_by().select_cols()))
def test_sample_rows(self):
ds = self.get_mt()
ds_small = ds.sample_rows(0.01)
self.assertTrue(ds_small.count_rows() < ds.count_rows())
@fails_service_backend()
def test_read_stored_cols(self):
ds = self.get_mt()
ds = ds.annotate_globals(x='foo')
f = new_temp_file(extension='mt')
ds.write(f)
t = hl.read_table(f + '/cols')
self.assertTrue(ds.cols().key_by()._same(t))
@skip_when_service_backend('Shuffler encoding/decoding is broken.')
def test_read_stored_rows(self):
ds = self.get_mt()
ds = ds.annotate_globals(x='foo')
f = new_temp_file(extension='mt')
ds.write(f)
t = hl.read_table(f + '/rows')
self.assertTrue(ds.rows()._same(t))
def test_read_stored_globals(self):
ds = self.get_mt()
ds = ds.annotate_globals(x=5, baz='foo')
f = new_temp_file(extension='mt')
ds.write(f)
t = hl.read_table(f + '/globals')
self.assertTrue(ds.globals_table()._same(t))
@fails_service_backend()
def test_indexed_read(self):
mt = hl.utils.range_matrix_table(2000, 100, 10)
f = new_temp_file(extension='mt')
mt.write(f)
mt2 = hl.read_matrix_table(f, _intervals=[
hl.Interval(start=150, end=250, includes_start=True, includes_end=False),
hl.Interval(start=250, end=500, includes_start=True, includes_end=False),
])
self.assertEqual(mt2.n_partitions(), 2)
self.assertTrue(mt.filter_rows((mt.row_idx >= 150) & (mt.row_idx < 500))._same(mt2))
mt2 = hl.read_matrix_table(f, _intervals=[
hl.Interval(start=150, end=250, includes_start=True, includes_end=False),
hl.Interval(start=250, end=500, includes_start=True, includes_end=False),
], _filter_intervals=True)
self.assertEqual(mt2.n_partitions(), 3)
self.assertTrue(mt.filter_rows((mt.row_idx >= 150) & (mt.row_idx < 500))._same(mt2))
@fails_service_backend()
def test_indexed_read_vcf(self):
vcf = self.get_mt(10)
f = new_temp_file(extension='mt')
vcf.write(f)
l1, l2, l3, l4 = hl.Locus('20', 10000000), hl.Locus('20', 11000000), hl.Locus('20', 13000000), hl.Locus('20', 14000000)
mt = hl.read_matrix_table(f, _intervals=[
hl.Interval(start=l1, end=l2),
hl.Interval(start=l3, end=l4),
])
self.assertEqual(mt.n_partitions(), 2)
p = (vcf.locus >= l1) & (vcf.locus < l2)
q = (vcf.locus >= l3) & (vcf.locus < l4)
self.assertTrue(vcf.filter_rows(p | q)._same(mt))
@fails_service_backend()
def test_codecs_matrix(self):
from hail.utils.java import scala_object
supported_codecs = scala_object(Env.hail().io, 'BufferSpec').specs()
ds = self.get_mt()
temp = new_temp_file(extension='mt')
for codec in supported_codecs:
ds.write(temp, overwrite=True, _codec_spec=codec.toString())
ds2 = hl.read_matrix_table(temp)
self.assertTrue(ds._same(ds2))
@fails_service_backend()
def test_codecs_table(self):
from hail.utils.java import scala_object
supported_codecs = scala_object(Env.hail().io, 'BufferSpec').specs()
rt = self.get_mt().rows()
temp = new_temp_file(extension='ht')
for codec in supported_codecs:
rt.write(temp, overwrite=True, _codec_spec=codec.toString())
rt2 = hl.read_table(temp)
self.assertTrue(rt._same(rt2))
@fails_service_backend()
def test_fix3307_read_mt_wrong(self):
mt = hl.import_vcf(resource('sample2.vcf'))
mt = hl.split_multi_hts(mt)
mt.write('/tmp/foo.mt', overwrite=True)
mt2 = hl.read_matrix_table('/tmp/foo.mt')
t = hl.read_table('/tmp/foo.mt/rows')
self.assertTrue(mt.rows()._same(t))
self.assertTrue(mt2.rows()._same(t))
self.assertTrue(mt._same(mt2))
def test_rename(self):
dataset = self.get_mt()
renamed1 = dataset.rename({'locus': 'locus2', 'info': 'info2', 's': 'info'})
self.assertEqual(renamed1['locus2']._type, dataset['locus']._type)
self.assertEqual(renamed1['info2']._type, dataset['info']._type)
self.assertEqual(renamed1['info']._type, dataset['s']._type)
self.assertEqual(renamed1['info']._indices, renamed1._col_indices)
self.assertFalse('locus' in renamed1._fields)
self.assertFalse('s' in renamed1._fields)
with self.assertRaises(ValueError):
dataset.rename({'locus': 'info'})
with self.assertRaises(ValueError):
dataset.rename({'locus': 'a', 's': 'a'})
with self.assertRaises(LookupError):
dataset.rename({'foo': 'a'})
def test_range(self):
ds = hl.utils.range_matrix_table(100, 10)
self.assertEqual(ds.count_rows(), 100)
self.assertEqual(ds.count_cols(), 10)
et = ds.annotate_entries(entry_idx=10 * ds.row_idx + ds.col_idx).entries().add_index()
self.assertTrue(et.all(et.idx == et.entry_idx))
def test_filter_entries(self):
ds = hl.utils.range_matrix_table(100, 10)
ds = ds.annotate_rows(foo=5) # triggered a RV bug
ds = ds.annotate_cols(bar=5)
ds = ds.filter_entries((ds.col_idx * ds.row_idx) % 4 == 0)
entries = ds.entries()
self.assertTrue(entries.all((entries.col_idx * entries.row_idx) % 4 == 0))
def test_filter_na(self):
mt = hl.utils.range_matrix_table(1, 1)
self.assertEqual(mt.filter_rows(hl.missing(hl.tbool)).count_rows(), 0)
self.assertEqual(mt.filter_cols(hl.missing(hl.tbool)).count_cols(), 0)
self.assertEqual(mt.filter_entries(hl.missing(hl.tbool)).entries().count(), 0)
@fails_service_backend()
def test_to_table_on_various_fields(self):
mt = hl.utils.range_matrix_table(3, 4)
globe = 'the globe!'
sample_ids = ['Bob', 'Alice', 'David', 'Carol']
entries = [1, 0, 3, 2]
rows = ['1:3:A:G', '1:2:A:G', '1:0:A:G']
sorted_rows = sorted(rows)
mt = mt.annotate_globals(globe=globe)
mt = mt.annotate_cols(s=hl.array(sample_ids)[mt.col_idx]).key_cols_by('s')
mt = mt.annotate_entries(e=hl.array(entries)[mt.col_idx])
mt = mt.annotate_rows(r=hl.array(rows)[mt.row_idx]).key_rows_by('r')
self.assertEqual(mt.globe.collect(), [globe])
self.assertEqual(mt.s.collect(), sample_ids)
self.assertEqual((mt.s + '1').collect(), [s + '1' for s in sample_ids])
self.assertEqual(('1' + mt.s).collect(), ['1' + s for s in sample_ids])
self.assertEqual(mt.s.take(1), [sample_ids[0]])
self.assertEqual(mt.e.collect(), entries * 3)
self.assertEqual(mt.e.take(1), [entries[0]])
self.assertEqual(mt.row_idx.collect(), [2, 1, 0])
self.assertEqual(mt.r.collect(), sorted_rows)
self.assertEqual(mt.r.take(1), [sorted_rows[0]])
self.assertEqual(mt.col_key.collect(),
[hl.Struct(s=s) for s in sample_ids])
self.assertEqual(mt.col.collect(),
[hl.Struct(s=s, col_idx=i) for i, s in enumerate(sample_ids)])
self.assertEqual(mt.row_key.collect(),
[hl.Struct(r=r) for r in sorted_rows])
self.assertEqual(mt.row.collect(),
sorted([hl.Struct(r=r, row_idx=i) for i, r in enumerate(rows)],
key=lambda x: x.r))
self.assertEqual(mt.entry.collect(),
[hl.Struct(e=e)
for _ in sorted_rows
for e in entries])
self.assertEqual(mt.cols().s.collect(), sorted(sample_ids))
self.assertEqual(mt.cols().s.take(1), [sorted(sample_ids)[0]])
self.assertEqual(mt.entries().e.collect(), sorted(entries) * 3)
self.assertEqual(mt.entries().e.take(1), [sorted(entries)[0]])
self.assertEqual(mt.rows().row_idx.collect(), [2, 1, 0])
self.assertEqual(mt.rows().r.collect(), sorted_rows)
self.assertEqual(mt.rows().r.take(1), [sorted_rows[0]])
@fails_service_backend()
def test_order_by(self):
ht = hl.utils.range_table(10)
self.assertEqual(ht.order_by('idx').idx.collect(), list(range(10)))
self.assertEqual(ht.order_by(hl.asc('idx')).idx.collect(), list(range(10)))
self.assertEqual(ht.order_by(hl.desc('idx')).idx.collect(), list(range(10))[::-1])
def test_order_by_complex_exprs(self):
ht = hl.utils.range_table(10)
assert ht.order_by(-ht.idx).idx.collect() == list(range(10))[::-1]
@fails_service_backend()
def test_order_by_intervals(self):
intervals = {0: hl.Interval(0, 3, includes_start=True, includes_end=False),
1: hl.Interval(0, 4, includes_start=True, includes_end=True),
2: hl.Interval(1, 4, includes_start=True, includes_end=False),
3: hl.Interval(0, 4, includes_start=False, includes_end=False),
4: hl.Interval(0, 4, includes_start=True, includes_end=False)}
ht = hl.utils.range_table(5)
ht = ht.annotate_globals(ilist=intervals)
ht = ht.annotate(interval=ht['ilist'][ht['idx']])
ht = ht.order_by(ht['interval'])
ordered = ht['interval'].collect()
expected = [intervals[i] for i in [0, 4, 1, 3, 2]]
self.assertEqual(ordered, expected)
def test_range_matrix_table(self):
mt = hl.utils.range_matrix_table(13, 7, n_partitions=5)
self.assertEqual(mt.globals.dtype, hl.tstruct())
self.assertEqual(mt.row.dtype, hl.tstruct(row_idx=hl.tint32))
self.assertEqual(mt.col.dtype, hl.tstruct(col_idx=hl.tint32))
self.assertEqual(mt.entry.dtype, hl.tstruct())
self.assertEqual(list(mt.row_key), ['row_idx'])
self.assertEqual(list(mt.col_key), ['col_idx'])
self.assertEqual([r.row_idx for r in mt.rows().collect()], list(range(13)))
self.assertEqual([r.col_idx for r in mt.cols().collect()], list(range(7)))
def test_range_matrix_table_0_rows_0_cols(self):
mt = hl.utils.range_matrix_table(0, 0)
self.assertEqual(mt.col_idx.collect(), [])
self.assertEqual(mt.row_idx.collect(), [])
mt = mt.annotate_entries(x=mt.row_idx * mt.col_idx)
self.assertEqual(mt.x.collect(), [])
def test_make_table(self):
mt = hl.utils.range_matrix_table(3, 2)
mt = mt.select_entries(x=mt.row_idx * mt.col_idx)
mt = mt.key_cols_by(col_idx=hl.str(mt.col_idx))
t = hl.Table.parallelize(
[{'row_idx': 0, '0.x': 0, '1.x': 0},
{'row_idx': 1, '0.x': 0, '1.x': 1},
{'row_idx': 2, '0.x': 0, '1.x': 2}],
hl.tstruct(**{'row_idx': hl.tint32, '0.x': hl.tint32, '1.x': hl.tint32}),
key='row_idx')
self.assertTrue(mt.make_table()._same(t))
def test_make_table_empty_entry_field(self):
mt = hl.utils.range_matrix_table(3, 2)
mt = mt.select_entries(**{'': mt.row_idx * mt.col_idx})
mt = mt.key_cols_by(col_idx=hl.str(mt.col_idx))
t = mt.make_table()
self.assertEqual(
t.row.dtype,
hl.tstruct(**{'row_idx': hl.tint32, '0': hl.tint32, '1': hl.tint32}))
def test_make_table_sep(self):
mt = hl.utils.range_matrix_table(3, 2)
mt = mt.select_entries(x=mt.row_idx * mt.col_idx)
mt = mt.key_cols_by(col_idx=hl.str(mt.col_idx))
t = mt.make_table()
assert list(t.row) == ['row_idx', '0.x', '1.x']
t = mt.make_table(separator='__')
assert list(t.row) == ['row_idx', '0__x', '1__x']
def test_make_table_row_equivalence(self):
mt = hl.utils.range_matrix_table(3, 3)
mt = mt.annotate_rows(r1 = hl.rand_norm(), r2 = hl.rand_norm())
mt = mt.annotate_entries(e1 = hl.rand_norm(), e2 = hl.rand_norm())
mt = mt.key_cols_by(col_idx=hl.str(mt.col_idx))
assert mt.make_table().select(*mt.row_value)._same(mt.rows())
def test_make_table_na_error(self):
mt = hl.utils.range_matrix_table(3, 3).key_cols_by(s = hl.missing('str'))
mt = mt.annotate_entries(e1 = 1)
with pytest.raises(ValueError):
mt.make_table()
def test_transmute(self):
mt = (
hl.utils.range_matrix_table(1, 1)
.annotate_globals(g1=0, g2=0)
.annotate_cols(c1=0, c2=0)
.annotate_rows(r1=0, r2=0)
.annotate_entries(e1=0, e2=0))
self.assertEqual(mt.transmute_globals(g3=mt.g2 + 1).globals.dtype, hl.tstruct(g1=hl.tint, g3=hl.tint))
self.assertEqual(mt.transmute_rows(r3=mt.r2 + 1).row_value.dtype, hl.tstruct(r1=hl.tint, r3=hl.tint))
self.assertEqual(mt.transmute_cols(c3=mt.c2 + 1).col_value.dtype, hl.tstruct(c1=hl.tint, c3=hl.tint))
self.assertEqual(mt.transmute_entries(e3=mt.e2 + 1).entry.dtype, hl.tstruct(e1=hl.tint, e3=hl.tint))
def test_transmute_agg(self):
mt = hl.utils.range_matrix_table(1, 1).annotate_entries(x=5)
mt = mt.transmute_rows(y = hl.agg.mean(mt.x))
def test_agg_explode(self):
t = hl.Table.parallelize([
hl.struct(a=[1, 2]),
hl.struct(a=hl.empty_array(hl.tint32)),
hl.struct(a=hl.missing(hl.tarray(hl.tint32))),
hl.struct(a=[3]),
hl.struct(a=[hl.missing(hl.tint32)])
])
self.assertCountEqual(t.aggregate(hl.agg.explode(lambda elt: hl.agg.collect(elt), t.a)),
[1, 2, None, 3])
def test_agg_call_stats(self):
t = hl.Table.parallelize([
hl.struct(c=hl.call(0, 0)),
hl.struct(c=hl.call(0, 1)),
hl.struct(c=hl.call(0, 2, phased=True)),
hl.struct(c=hl.call(1)),
hl.struct(c=hl.call(0)),
hl.struct(c=hl.call())
])
actual = t.aggregate(hl.agg.call_stats(t.c, ['A', 'T', 'G']))
expected = hl.struct(AC=[5, 2, 1],
AF=[5.0 / 8.0, 2.0 / 8.0, 1.0 / 8.0],
AN=8,
homozygote_count=[1, 0, 0])
self.assertTrue(hl.Table.parallelize([actual]),
hl.Table.parallelize([expected]))
@fails_service_backend()
@fails_local_backend()
def test_hardy_weinberg_test(self):
mt = hl.import_vcf(resource('HWE_test.vcf'))
mt_two_sided = mt.select_rows(**hl.agg.hardy_weinberg_test(mt.GT, one_sided=False))
rt_two_sided = mt_two_sided.rows()
expected_two_sided = hl.Table.parallelize([
hl.struct(
locus=hl.locus('20', pos),
alleles=alleles,
het_freq_hwe=r,
p_value=p
)
for (pos, alleles, r, p) in [
(1, ['A', 'G'], 0.0, 0.5),
(2, ['A', 'G'], 0.25, 0.5),
(3, ['T', 'C'], 0.5357142857142857, 0.21428571428571427),
(4, ['T', 'A'], 0.5714285714285714, 0.6571428571428573),
(5, ['G', 'A'], 0.3333333333333333, 0.5)]],
key=['locus', 'alleles'])
self.assertTrue(rt_two_sided.filter(rt_two_sided.locus.position != 6)._same(expected_two_sided))
rt6_two_sided = rt_two_sided.filter(rt_two_sided.locus.position == 6).collect()[0]
self.assertEqual(rt6_two_sided['p_value'], 0.5)
self.assertTrue(math.isnan(rt6_two_sided['het_freq_hwe']))
mt_one_sided = mt.select_rows(**hl.agg.hardy_weinberg_test(mt.GT, one_sided=True))
rt_one_sided = mt_one_sided.rows()
expected_one_sided = hl.Table.parallelize([
hl.struct(
locus=hl.locus('20', pos),
alleles=alleles,
het_freq_hwe=r,
p_value=p
)
for (pos, alleles, r, p) in [
(1, ['A', 'G'], 0.0, 0.5),
(2, ['A', 'G'], 0.25, 0.5),
(3, ['T', 'C'], 0.5357142857142857, 0.7857142857142857),
(4, ['T', 'A'], 0.5714285714285714, 0.5714285714285715),
(5, ['G', 'A'], 0.3333333333333333, 0.5)]],
key=['locus', 'alleles'])
self.assertTrue(rt_one_sided.filter(rt_one_sided.locus.position != 6)._same(expected_one_sided))
rt6_one_sided = rt_one_sided.filter(rt_one_sided.locus.position == 6).collect()[0]
self.assertEqual(rt6_one_sided['p_value'], 0.5)
self.assertTrue(math.isnan(rt6_one_sided['het_freq_hwe']))
def test_hw_func_and_agg_agree(self):
mt = hl.import_vcf(resource('sample.vcf'))
mt_two_sided = mt.annotate_rows(
stats=hl.agg.call_stats(mt.GT, mt.alleles),
hw=hl.agg.hardy_weinberg_test(mt.GT, one_sided=False)
)
mt_two_sided = mt_two_sided.annotate_rows(
hw2=hl.hardy_weinberg_test(
mt_two_sided.stats.homozygote_count[0],
mt_two_sided.stats.AC[1] - 2 * mt_two_sided.stats.homozygote_count[1],
mt_two_sided.stats.homozygote_count[1],
one_sided=False
)
)
rt_two_sided = mt_two_sided.rows()
self.assertTrue(rt_two_sided.all(rt_two_sided.hw == rt_two_sided.hw2))
mt_one_sided = mt.annotate_rows(
stats=hl.agg.call_stats(mt.GT, mt.alleles),
hw=hl.agg.hardy_weinberg_test(mt.GT, one_sided=True)
)
mt_one_sided = mt_one_sided.annotate_rows(
hw2=hl.hardy_weinberg_test(
mt_one_sided.stats.homozygote_count[0],
mt_one_sided.stats.AC[1] - 2 * mt_one_sided.stats.homozygote_count[1],
mt_one_sided.stats.homozygote_count[1],
one_sided=True
)
)
rt_one_sided = mt_one_sided.rows()
self.assertTrue(rt_one_sided.all(rt_one_sided.hw == rt_one_sided.hw2))
@fails_service_backend()
@fails_local_backend()
def test_write_stage_locally(self):
mt = self.get_mt()
f = new_temp_file(extension='mt')
mt.write(f, stage_locally=True)
mt2 = hl.read_matrix_table(f)
self.assertTrue(mt._same(mt2))
@skip_when_service_backend('ShuffleRead non-deterministically causes segfaults')
def test_write_checkpoint_file(self):
mt = self.get_mt()
f = new_temp_file(extension='mt')
cp = new_temp_file()
mt.write(f, _checkpoint_file=cp)
mt2 = hl.read_matrix_table(f)
self.assertTrue(mt._same(mt2))
@fails_service_backend()
def test_write_no_parts(self):
mt = hl.utils.range_matrix_table(10, 10, 2).filter_rows(False)
path = new_temp_file(extension='mt')
path2 = new_temp_file(extension='mt')
assert mt.checkpoint(path)._same(mt)
hl.read_matrix_table(path, _drop_rows=True).write(path2)
def test_nulls_in_distinct_joins(self):
# MatrixAnnotateRowsTable uses left distinct join
mr = hl.utils.range_matrix_table(7, 3, 4)
matrix1 = mr.key_rows_by(new_key=hl.if_else((mr.row_idx == 3) | (mr.row_idx == 5),
hl.missing(hl.tint32), mr.row_idx))
matrix2 = mr.key_rows_by(new_key=hl.if_else((mr.row_idx == 4) | (mr.row_idx == 6),
hl.missing(hl.tint32), mr.row_idx))
joined = matrix1.select_rows(idx1=matrix1.row_idx,
idx2=matrix2.rows()[matrix1.new_key].row_idx)
def row(new_key, idx1, idx2):
return hl.Struct(new_key=new_key, idx1=idx1, idx2=idx2)
expected = [row(0, 0, 0),
row(1, 1, 1),
row(2, 2, 2),
row(4, 4, None),
row(6, 6, None),
row(None, 3, None),
row(None, 5, None)]
self.assertEqual(joined.rows().collect(), expected)
# union_cols uses inner distinct join
matrix1 = matrix1.annotate_entries(ridx=matrix1.row_idx,
cidx=matrix1.col_idx)
matrix2 = matrix2.annotate_entries(ridx=matrix2.row_idx,
cidx=matrix2.col_idx)
matrix2 = matrix2.key_cols_by(col_idx=matrix2.col_idx + 3)
expected = hl.utils.range_matrix_table(3, 6, 1)
expected = expected.key_rows_by(new_key=expected.row_idx)
expected = expected.annotate_entries(ridx=expected.row_idx,
cidx=expected.col_idx % 3)
self.assertTrue(matrix1.union_cols(matrix2)._same(expected))
@fails_service_backend()
@fails_local_backend()
def test_row_joins_into_table(self):
rt = hl.utils.range_matrix_table(9, 13, 3)
mt1 = rt.key_rows_by(idx=rt.row_idx)
mt1 = mt1.select_rows(v=mt1.idx + 2)
mt2 = rt.key_rows_by(idx=rt.row_idx, idx2=rt.row_idx + 1)
mt2 = mt2.select_rows(v=mt2.idx + 2)
t1 = hl.utils.range_table(10, 3)
t2 = t1.key_by(t1.idx, idx2=t1.idx + 1)
t1 = t1.select(v=t1.idx + 2)
t2 = t2.select(v=t2.idx + 2)
tinterval1 = t1.key_by(k=hl.interval(t1.idx, t1.idx, True, True))
tinterval1 = tinterval1.select(v=tinterval1.idx + 2)
tinterval2 = t2.key_by(k=hl.interval(t2.key, t2.key, True, True))
tinterval2 = tinterval2.select(v=tinterval2.idx + 2)
values = [hl.Struct(v=i + 2) for i in range(9)]
# join on mt row key
self.assertEqual(t1.index(mt1.row_key).collect(), values)
self.assertEqual(t2.index(mt2.row_key).collect(), values)
self.assertEqual(t1.index(mt1.idx).collect(), values)
self.assertEqual(t2.index(mt2.idx, mt2.idx2).collect(), values)
self.assertEqual(t1.index(mt2.idx).collect(), values)
with self.assertRaises(hl.expr.ExpressionException):
t2.index(mt2.idx).collect()
with self.assertRaises(hl.expr.ExpressionException):
t2.index(mt1.row_key).collect()
# join on not mt row key
self.assertEqual(t1.index(mt1.v).collect(), [hl.Struct(v=i + 2) for i in range(2, 10)] + [None])
self.assertEqual(t2.index(mt2.idx2, mt2.v).collect(), [hl.Struct(v=i + 2) for i in range(1, 10)])
with self.assertRaises(hl.expr.ExpressionException):
t2.index(mt2.v).collect()
# join on interval of first field of mt row key
self.assertEqual(tinterval1.index(mt1.idx).collect(), values)
self.assertEqual(tinterval1.index(mt1.row_key).collect(), values)
self.assertEqual(tinterval1.index(mt2.idx).collect(), values)
with self.assertRaises(hl.expr.ExpressionException):
tinterval1.index(mt2.row_key).collect()
with self.assertRaises(hl.expr.ExpressionException):
tinterval2.index(mt2.idx).collect()
with self.assertRaises(hl.expr.ExpressionException):
tinterval2.index(mt2.row_key).collect()
with self.assertRaises(hl.expr.ExpressionException):
tinterval2.index(mt2.idx, mt2.idx2).collect()
def test_refs_with_process_joins(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.annotate_entries(
a_literal=hl.literal(['a']),
a_col_join=hl.is_defined(mt.cols()[mt.col_key]),
a_row_join=hl.is_defined(mt.rows()[mt.row_key]),
an_entry_join=hl.is_defined(mt[mt.row_key, mt.col_key]),
the_global_failure=hl.if_else(True, mt.globals, hl.missing(mt.globals.dtype)),
the_row_failure=hl.if_else(True, mt.row, hl.missing(mt.row.dtype)),
the_col_failure=hl.if_else(True, mt.col, hl.missing(mt.col.dtype)),
the_entry_failure=hl.if_else(True, mt.entry, hl.missing(mt.entry.dtype)),
)
mt.count()
def test_aggregate_localize_false(self):
dim1, dim2 = 10, 10
mt = hl.utils.range_matrix_table(dim1, dim2)
mt = mt.annotate_entries(x = mt.aggregate_rows(hl.agg.max(mt.row_idx), _localize=False)
+ mt.aggregate_cols(hl.agg.max(mt.col_idx), _localize=False)
+ mt.aggregate_entries(hl.agg.max(mt.row_idx * mt.col_idx), _localize=False)
)
assert mt.x.take(1)[0] == (dim1 - 1) + (dim2 - 1) + (dim1 -1) * (dim2 - 1)
def test_agg_cols_filter(self):
t = hl.utils.range_matrix_table(1, 10)
tests = [(agg.filter(t.col_idx > 7,
agg.collect(t.col_idx + 1).append(0)),
[9, 10, 0]),
(agg.filter(t.col_idx > 7,
agg.explode(lambda elt: agg.collect(elt + 1).append(0),
[t.col_idx, t.col_idx + 1])),
[9, 10, 10, 11, 0]),
(agg.filter(t.col_idx > 7,
agg.group_by(t.col_idx % 3,
hl.array(agg.collect_as_set(t.col_idx + 1)).append(0))),
{0: [10, 0], 2: [9, 0]})
]
for aggregation, expected in tests:
self.assertEqual(t.select_rows(result = aggregation).result.collect()[0], expected)
def test_agg_cols_explode(self):
t = hl.utils.range_matrix_table(1, 10)
tests = [(agg.explode(lambda elt: agg.collect(elt + 1).append(0),
hl.if_else(t.col_idx > 7, [t.col_idx, t.col_idx + 1], hl.empty_array(hl.tint32))),
[9, 10, 10, 11, 0]),
(agg.explode(lambda elt: agg.explode(lambda elt2: agg.collect(elt2 + 1).append(0),
[elt, elt + 1]),
hl.if_else(t.col_idx > 7, [t.col_idx, t.col_idx + 1], hl.empty_array(hl.tint32))),
[9, 10, 10, 11, 10, 11, 11, 12, 0]),
(agg.explode(lambda elt: agg.filter(elt > 8,
agg.collect(elt + 1).append(0)),
hl.if_else(t.col_idx > 7, [t.col_idx, t.col_idx + 1], hl.empty_array(hl.tint32))),
[10, 10, 11, 0]),
(agg.explode(lambda elt: agg.group_by(elt % 3,
agg.collect(elt + 1).append(0)),
hl.if_else(t.col_idx > 7,
[t.col_idx, t.col_idx + 1],
hl.empty_array(hl.tint32))),
{0: [10, 10, 0], 1: [11, 0], 2:[9, 0]})
]
for aggregation, expected in tests:
self.assertEqual(t.select_rows(result = aggregation).result.collect()[0], expected)
def test_agg_cols_group_by(self):
t = hl.utils.range_matrix_table(1, 10)
tests = [(agg.group_by(t.col_idx % 2,
hl.array(agg.collect_as_set(t.col_idx + 1)).append(0)),
{0: [1, 3, 5, 7, 9, 0], 1: [2, 4, 6, 8, 10, 0]}),
(agg.group_by(t.col_idx % 3,
agg.filter(t.col_idx > 7,
hl.array(agg.collect_as_set(t.col_idx + 1)).append(0))),
{0: [10, 0], 1: [0], 2: [9, 0]}),
(agg.group_by(t.col_idx % 3,
agg.explode(lambda elt: agg.collect(elt + 1).append(0),
hl.if_else(t.col_idx > 7,
[t.col_idx, t.col_idx + 1],
hl.empty_array(hl.tint32)))),
{0: [10, 11, 0], 1: [0], 2:[9, 10, 0]}),
]
for aggregation, expected in tests:
self.assertEqual(t.select_rows(result = aggregation).result.collect()[0], expected)
def localize_entries_with_both_none_is_rows_table(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.select_entries(x = mt.row_idx * mt.col_idx)
localized = mt.localize_entries(entries_array_field_name=None,
columns_array_field_name=None)
rows_table = mt.rows()
assert rows_table.collect() == localized.collect()
assert rows_table.globals_table().collect() == localized.globals_table().collect()
def localize_entries_with_none_cols_adds_no_globals(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.select_entries(x = mt.row_idx * mt.col_idx)
localized = mt.localize_entries(entries_array_field_name=Env.get_uid(),
columns_array_field_name=None)
assert mt.globals_table().collect() == localized.globals_table().collect()
def localize_entries_with_none_entries_changes_no_rows(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.select_entries(x = mt.row_idx * mt.col_idx)
localized = mt.localize_entries(entries_array_field_name=None,
columns_array_field_name=Env.get_uid())
rows_table = mt.rows()
assert rows_table.collect() == localized.collect()
def localize_entries_creates_arrays_of_entries_and_array_of_cols(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.select_entries(x = mt.row_idx * mt.col_idx)
localized = mt.localize_entries(entries_array_field_name='entries',
columns_array_field_name='cols')
assert [[x * y for x in range(0, 10)] for y in range(0, 10)] == localized.entries.collect()
assert range(0, 10) == localized.cols.collect()
@fails_service_backend()
@fails_local_backend()
def test_multi_write(self):
mt = self.get_mt()
f = new_temp_file()
hl.experimental.write_matrix_tables([mt, mt], f)
path1 = f + '0.mt'
path2 = f + '1.mt'
mt1 = hl.read_matrix_table(path1)
mt2 = hl.read_matrix_table(path2)
self.assertTrue(mt._same(mt1))
self.assertTrue(mt._same(mt2))
self.assertTrue(mt1._same(mt2))
def test_matrix_type_equality(self):
mt = hl.utils.range_matrix_table(1, 1)
mt2 = mt.annotate_entries(foo=1)
assert mt._type == mt._type
assert mt._type != mt2._type
def test_entry_filtering(self):
mt = hl.utils.range_matrix_table(10, 10)
mt = mt.filter_entries((mt.col_idx + mt.row_idx) % 2 == 0)
assert mt.aggregate_entries(hl.agg.count()) == 50
assert all(x == 5 for x in mt.annotate_cols(x = hl.agg.count()).x.collect())
assert all(x == 5 for x in mt.annotate_rows(x = hl.agg.count()).x.collect())
mt = mt.unfilter_entries()
assert mt.aggregate_entries(hl.agg.count()) == 100
assert all(x == 10 for x in mt.annotate_cols(x = hl.agg.count()).x.collect())
assert all(x == 10 for x in mt.annotate_rows(x = hl.agg.count()).x.collect())
def test_entry_filter_stats(self):
mt = hl.utils.range_matrix_table(40, 20)
mt = mt.filter_entries((mt.row_idx % 4 == 0) & (mt.col_idx % 4 == 0), keep=False)
mt = mt.compute_entry_filter_stats()
row_expected = hl.dict({True: hl.struct(n_filtered=5,
n_remaining=15,
fraction_filtered=hl.float32(0.25)),
False: hl.struct(n_filtered=0,
n_remaining=20,
fraction_filtered=hl.float32(0.0))})
assert mt.aggregate_rows(hl.agg.all(mt.entry_stats_row == row_expected[mt.row_idx % 4 == 0]))
col_expected = hl.dict({True: hl.struct(n_filtered=10,
n_remaining=30,
fraction_filtered=hl.float32(0.25)),
False: hl.struct(n_filtered=0,
n_remaining=40,
fraction_filtered=hl.float32(0.0))})
assert mt.aggregate_cols(hl.agg.all(mt.entry_stats_col == col_expected[mt.col_idx % 4 == 0]))
def test_annotate_col_agg_lowering(self):
mt = hl.utils.range_matrix_table(10, 10, 2)
mt = mt.annotate_cols(c1=[mt.col_idx, mt.col_idx * 2])
mt = mt.annotate_entries(e1=mt.col_idx + mt.row_idx, e2=[mt.col_idx * mt.row_idx, mt.col_idx * mt.row_idx ** 2])
common_ref = mt.c1[1]
mt = mt.annotate_cols(exploded=hl.agg.explode(lambda e: common_ref + hl.agg.sum(e), mt.e2),
array=hl.agg.array_agg(lambda e: common_ref + hl.agg.sum(e), mt.e2),
filt=hl.agg.filter(mt.e1 < 5, hl.agg.sum(mt.e1) + common_ref),
grouped=hl.agg.group_by(mt.e1 % 5, hl.agg.sum(mt.e1) + common_ref))
mt.cols()._force_count()
def test_annotate_rows_scan_lowering(self):
mt = hl.utils.range_matrix_table(10, 10, 2)
mt = mt.annotate_rows(r1=[mt.row_idx, mt.row_idx * 2])
common_ref = mt.r1[1]
mt = mt.annotate_rows(exploded=hl.scan.explode(lambda e: common_ref + hl.scan.sum(e), mt.r1),
array=hl.scan.array_agg(lambda e: common_ref + hl.scan.sum(e), mt.r1),
filt=hl.scan.filter(mt.row_idx < 5, hl.scan.sum(mt.row_idx) + common_ref),
grouped=hl.scan.group_by(mt.row_idx % 5, hl.scan.sum(mt.row_idx) + common_ref),
an_agg = hl.agg.sum(mt.row_idx * mt.col_idx))
mt.cols()._force_count()
def test_show_runs(self):
mt = self.get_mt()
mt.show()
def test_show_header(self):
mt = hl.utils.range_matrix_table(1, 1)
mt = mt.annotate_entries(x=1)
mt = mt.key_cols_by(col_idx=mt.col_idx + 10)
expected = ('+---------+-------+\n'
'| row_idx | 10.x |\n'
'+---------+-------+\n'
'| int32 | int32 |\n'
'+---------+-------+\n'
'| 0 | 1 |\n'
'+---------+-------+\n')
actual = mt.show(handler=str)
assert actual == expected
def test_partitioned_write(self):
mt = hl.utils.range_matrix_table(40, 3, 5)
def test_parts(parts, expected=mt):
parts = [
hl.Interval(start=hl.Struct(row_idx=s), end=hl.Struct(row_idx=e),
includes_start=_is, includes_end=ie)
for (s, e, _is, ie) in parts
]
tmp = new_temp_file(extension='mt')
mt.write(tmp, _partitions=parts)
mt2 = hl.read_matrix_table(tmp)
self.assertEqual(mt2.n_partitions(), len(parts))
self.assertTrue(mt2._same(expected))
test_parts([
(0, 40, True, False)
])
test_parts([
(-34, -31, True, True),
(-30, 9, True, True),
(10, 107, True, True),
(108, 1000, True, True)
])
test_parts([
(0, 5, True, False),
(35, 40, True, True)
],
mt.filter_rows((mt.row_idx < 5) | (mt.row_idx >= 35)))
test_parts([
(5, 35, True, False)
],
mt.filter_rows((mt.row_idx >= 5) & (mt.row_idx < 35)))
@skip_when_service_backend('Shuffler encoding/decoding is broken.')
def test_partitioned_write_coerce(self):
mt = hl.import_vcf(resource('sample.vcf'))
parts = [
hl.Interval(hl.Locus('20', 10277621), hl.Locus('20', 11898992))
]
tmp = new_temp_file(extension='mt')
mt.write(tmp, _partitions=parts)
mt2 = hl.read_matrix_table(tmp)
assert mt2.n_partitions() == len(parts)
assert hl.filter_intervals(mt, parts)._same(mt2)
def test_overwrite(self):
mt = hl.utils.range_matrix_table(1, 1)
f = new_temp_file(extension='mt')
mt.write(f)
with pytest.raises(hl.utils.FatalError, match= "file already exists"):
mt.write(f)
mt.write(f, overwrite=True)
def test_invalid_metadata(self):
with pytest.raises(hl.utils.FatalError, match='metadata does not contain file version'):
hl.read_matrix_table(resource('0.1-1fd5cc7.vds'))
def test_legacy_files_with_required_globals(self):
hl.read_table(resource('required_globals.ht'))._force_count()
hl.read_matrix_table(resource('required_globals.mt'))._force_count_rows()
def test_matrix_native_write_range(self):
mt = hl.utils.range_matrix_table(11, 3, n_partitions=3)
f = new_temp_file()
mt.write(f)
assert hl.read_matrix_table(f)._same(mt)
@fails_service_backend()
@fails_local_backend()
def test_matrix_multi_write_range(self):
mts = [
hl.utils.range_matrix_table(11, 27, n_partitions=10),
hl.utils.range_matrix_table(11, 3, n_partitions=10)
]
f = new_temp_file()
hl.experimental.write_matrix_tables(mts, f)
assert hl.read_matrix_table(f + '0.mt')._same(mts[0])
assert hl.read_matrix_table(f + '1.mt')._same(mts[1])
def test_key_cols_by_extract_issue(self):
mt = hl.utils.range_matrix_table(1000, 100)
mt = mt.key_cols_by(col_id = hl.str(mt.col_idx))
mt = mt.add_col_index()
mt.show()
def test_filtered_entries_group_rows_by(self):
mt = hl.utils.range_matrix_table(1, 1)
mt = mt.filter_entries(False)
mt = mt.group_rows_by(x=mt.row_idx // 10).aggregate(c=hl.agg.count())
assert mt.entries().collect() == [hl.Struct(x=0, col_idx=0, c=0)]
def test_filtered_entries_group_cols_by(self):
mt = hl.utils.range_matrix_table(1, 1)
mt = mt.filter_entries(False)
mt = mt.group_cols_by(x=mt.col_idx // 10).aggregate(c=hl.agg.count())
assert mt.entries().collect() == [hl.Struct(row_idx=0, x=0, c=0)]
def test_invalid_field_ref_error(self):
mt = hl.balding_nichols_model(2, 5, 5)
mt2 = hl.balding_nichols_model(2, 5, 5)
with pytest.raises(hl.expr.ExpressionException, match='Found fields from 2 objects:'):
mt.annotate_entries(x = mt.GT.n_alt_alleles() * mt2.af)
def test_invalid_field_ref_annotate(self):
mt = hl.balding_nichols_model(2, 5, 5)
mt2 = hl.balding_nichols_model(2, 5, 5)
with pytest.raises(hl.expr.ExpressionException, match='source mismatch'):
mt.annotate_entries(x = mt2.af)
def test_read_write_all_types():
mt = create_all_values_matrix_table()
tmp_file = new_temp_file()
mt.write(tmp_file)
assert hl.read_matrix_table(tmp_file)._same(mt)
@fails_service_backend()
@fails_local_backend()
def test_read_write_balding_nichols_model():
mt = hl.balding_nichols_model(3, 10, 10)
tmp_file = new_temp_file()
mt.write(tmp_file)
assert hl.read_matrix_table(tmp_file)._same(mt)
@fails_service_backend()
@fails_local_backend()
def test_read_partitions():
ht = hl.utils.range_matrix_table(n_rows=100, n_cols=10, n_partitions=3)
path = new_temp_file()
ht.write(path)
assert hl.read_matrix_table(path, _n_partitions=10).n_partitions() == 10
|
Validation/SiTrackerPhase2V/python/Phase2TrackerValidateDigi_cfi.py | ckamtsikis/cmssw | 852 | 12673413 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
digiValid = DQMEDAnalyzer('Phase2TrackerValidateDigi',
Verbosity = cms.bool(False),
TopFolderName = cms.string("Ph2TkPixelDigi"),
PixelPlotFillingFlag = cms.bool(False),
OuterTrackerDigiSource = cms.InputTag("mix", "Tracker"),
OuterTrackerDigiSimLinkSource = cms.InputTag("simSiPixelDigis", "Tracker"),
InnerPixelDigiSource = cms.InputTag("simSiPixelDigis","Pixel"),
InnerPixelDigiSimLinkSource = cms.InputTag("simSiPixelDigis", "Pixel"),
PSimHitSource = cms.VInputTag('g4SimHits:TrackerHitsPixelBarrelLowTof',
'g4SimHits:TrackerHitsPixelBarrelHighTof',
'g4SimHits:TrackerHitsPixelEndcapLowTof',
'g4SimHits:TrackerHitsPixelEndcapHighTof',
'g4SimHits:TrackerHitsTIBLowTof',
'g4SimHits:TrackerHitsTIBHighTof',
'g4SimHits:TrackerHitsTIDLowTof',
'g4SimHits:TrackerHitsTIDHighTof',
'g4SimHits:TrackerHitsTOBLowTof',
'g4SimHits:TrackerHitsTOBHighTof',
'g4SimHits:TrackerHitsTECLowTof',
'g4SimHits:TrackerHitsTECHighTof'),
SimTrackSource = cms.InputTag("g4SimHits"),
SimVertexSource = cms.InputTag("g4SimHits"),
GeometryType = cms.string('idealForDigi'),
PtCutOff = cms.double(2.0), #9.5
EtaCutOff = cms.double(3.5),
TOFLowerCutOff = cms.double(-12.5),
TOFUpperCutOff = cms.double(12.5),
TrackPtH = cms.PSet(
Nbins = cms.int32(50),
xmin = cms.double(0.0),
xmax = cms.double(100.0),
switch = cms.bool(True)
),
TrackEtaH = cms.PSet(
Nbins = cms.int32(45),
xmin = cms.double(-4.5),
xmax = cms.double(4.5),
switch = cms.bool(True)
),
TrackPhiH = cms.PSet(
Nbins = cms.int32(64),
xmin = cms.double(-3.2),
xmax = cms.double(3.2),
switch = cms.bool(True)
),
SimHitElossH = cms.PSet(
Nbins = cms.int32(100),
xmin = cms.double(0.0),
xmax = cms.double(100000.0),
switch = cms.bool(True)
),
SimHitDxH = cms.PSet(
Nbins = cms.int32(1000),
xmin = cms.double(0.0),
xmax = cms.double(0.1),
switch = cms.bool(True)
),
SimHitDyH = cms.PSet(
Nbins = cms.int32(1000),
xmin = cms.double(0.0),
xmax = cms.double(0.1),
switch = cms.bool(True)
),
SimHitDzH = cms.PSet(
Nbins = cms.int32(150),
xmin = cms.double(0.0),
xmax = cms.double(0.03),
switch = cms.bool(True)
),
XYPositionMapH = cms.PSet(
Nxbins = cms.int32(1250),
xmin = cms.double(-1250.),
xmax = cms.double(1250.),
Nybins = cms.int32(1250),
ymin = cms.double(-1250.),
ymax = cms.double(1250.),
switch = cms.bool(False)
),
RZPositionMapH = cms.PSet(
Nxbins = cms.int32(3000),
xmin = cms.double(-3000.),
xmax = cms.double(3000.),
Nybins = cms.int32(1250),
ymin = cms.double(0.),
ymax = cms.double(1250.),
switch = cms.bool(False)
),
TOFEtaMapH = cms.PSet(
Nxbins = cms.int32(45),
xmin = cms.double(-4.5),
xmax = cms.double(4.5),
Nybins = cms.int32(100),
ymin = cms.double(0.),
ymax = cms.double(50.),
switch = cms.bool(False)
),
TOFPhiMapH = cms.PSet(
Nxbins = cms.int32(64),
xmin = cms.double(-3.2),
xmax = cms.double(3.2),
Nybins = cms.int32(100),
ymin = cms.double(0.),
ymax = cms.double(50.),
switch = cms.bool(False)
),
TOFZMapH = cms.PSet(
Nxbins = cms.int32(3000),
xmin = cms.double(-300.),
xmax = cms.double(300.),
Nybins = cms.int32(100),
ymin = cms.double(0.),
ymax = cms.double(50.),
switch = cms.bool(False)
),
TOFRMapH = cms.PSet(
Nxbins = cms.int32(1200),
xmin = cms.double(0.),
xmax = cms.double(120.),
Nybins = cms.int32(100),
ymin = cms.double(0.),
ymax = cms.double(50.),
switch = cms.bool(False)
)
)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
premix_stage2.toModify(digiValid,
InnerPixelDigiSource = "mixData:Pixel",
OuterTrackerDigiSource = "mixData:Tracker",
OuterTrackerDigiSimLinkSource = "mixData:Phase2OTDigiSimLink",
InnerPixelDigiSimLinkSource = "mixData:PixelDigiSimLink",
)
|
references/classification/utils.py | yoshitomo-matsubara/vision | 12,063 | 12673450 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = reduce_across_processes([self.count, self.total])
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger:
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(f"{name}: {str(meter)}")
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"{header} Total time: {total_time_str}")
class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
"""Maintains moving averages of model parameters using an exponential decay.
``ema_avg = decay * avg_model_param + (1 - decay) * model_param``
`torch.optim.swa_utils.AveragedModel <https://pytorch.org/docs/stable/optim.html#custom-averaging-strategies>`_
is used to compute the EMA.
"""
def __init__(self, model, decay, device="cpu"):
def ema_avg(avg_model_param, model_param, num_averaged):
return decay * avg_model_param + (1 - decay) * model_param
super().__init__(model, device, ema_avg)
def update_parameters(self, model):
for p_swa, p_model in zip(self.module.state_dict().values(), model.state_dict().values()):
device = p_swa.device
p_model_ = p_model.detach().to(device)
if self.n_averaged == 0:
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_, self.n_averaged.to(device)))
self.n_averaged += 1
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
target = target.max(dim=1)[1]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
setup_for_distributed(args.rank == 0)
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from:
https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16
Args:
inputs (List[str]): An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(lambda s, _: torch.serialization.default_restore_location(s, "cpu")),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
f"For checkpoint {f}, expected list of params: {params_keys}, but found: {model_params_keys}"
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True):
"""
This method can be used to prepare weights files for new models. It receives as
input a model architecture and a checkpoint from the training script and produces
a file with the weights ready for release.
Examples:
from torchvision import models as M
# Classification
model = M.mobilenet_v3_large(pretrained=False)
print(store_model_weights(model, './class.pth'))
# Quantized Classification
model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False)
model.fuse_model()
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack')
_ = torch.ao.quantization.prepare_qat(model, inplace=True)
print(store_model_weights(model, './qat.pth'))
# Object Detection
model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False)
print(store_model_weights(model, './obj.pth'))
# Segmentation
model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True)
print(store_model_weights(model, './segm.pth', strict=False))
Args:
model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes.
checkpoint_path (str): The path of the checkpoint we will load.
checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored.
Default: "model".
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
output_path (str): The location where the weights are saved.
"""
# Store the new model next to the checkpoint_path
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
# Deep copy to avoid side-effects on the model object.
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the weights to the model to validate that everything works
# and remove unnecessary weights (such as auxiliaries, etc)
if checkpoint_key == "model_ema":
del checkpoint[checkpoint_key]["n_averaged"]
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(checkpoint[checkpoint_key], "module.")
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
os.replace(tmp_path, output_path)
return output_path
def reduce_across_processes(val):
if not is_dist_avail_and_initialized():
# nothing to sync, but we still convert to tensor for consistency with the distributed case.
return torch.tensor(val)
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t)
return t
|
hatsploit/core/utils/ui/tip.py | EntySec/HatSploit | 139 | 12673451 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import random
from hatsploit.core.cli.badges import Badges
from hatsploit.core.cli.colors import Colors
from hatsploit.core.cli.parser import Parser
from hatsploit.core.utils.ui.colors_script import ColorsScript
from hatsploit.lib.config import Config
class Tip:
parser = Parser()
config = Config()
badges = Badges()
colors = Colors()
colors_script = ColorsScript()
def print_random_tip(self):
if os.path.exists(self.config.path_config['tips_path']):
tips = []
all_tips = os.listdir(self.config.path_config['tips_path'])
for tip in all_tips:
tips.append(tip)
if tips:
tip = ""
while not tip:
random_tip = random.randint(0, len(tips) - 1)
tip = self.colors_script.parse_colors_script(
self.config.path_config['tips_path'] + tips[random_tip])
self.badges.print_empty(f"%newline%endHatSploit Tip: {tip}%end%newline")
else:
self.badges.print_warning("No tips detected.")
else:
self.badges.print_warning("No tips detected.")
|
venv/Lib/site-packages/ipykernel/gui/gtkembed.py | ajayiagbebaku/NFL-Model | 652 | 12673458 | <filename>venv/Lib/site-packages/ipykernel/gui/gtkembed.py
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
# Third-party
import gobject
import gtk
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class GTKEmbed(object):
"""A class to embed a kernel into the GTK main event loop.
"""
def __init__(self, kernel):
self.kernel = kernel
# These two will later store the real gtk functions when we hijack them
self.gtk_main = None
self.gtk_main_quit = None
def start(self):
"""Starts the GTK main event loop and sets our kernel startup routine.
"""
# Register our function to initiate the kernel and start gtk
gobject.idle_add(self._wire_kernel)
gtk.main()
def _wire_kernel(self):
"""Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
"""
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
gobject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
def iterate_kernel(self):
"""Run one iteration of the kernel and return True.
GTK timer functions must return True to be called again, so we make the
call to :meth:`do_one_iteration` and then return True for GTK.
"""
self.kernel.do_one_iteration()
return True
def stop(self):
# FIXME: this one isn't getting called because we have no reliable
# kernel shutdown. We need to fix that: once the kernel has a
# shutdown mechanism, it can call this.
self.gtk_main_quit()
sys.exit()
def _hijack_gtk(self):
"""Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- gtk.main
- gtk.main_quit
"""
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, gtk.main = gtk.main, dummy
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
return orig_main, orig_main_quit
|
app/ivr/__init__.py | itworxs/suite | 890 | 12673477 | from flask import Blueprint
ivr = Blueprint('ivr', __name__)
from . import views
|
dataviz/flagssubdivisions.py | Udzu/pudzu | 119 | 12673536 | <reponame>Udzu/pudzu
from pudzu.charts import *
df = pd.read_csv("datasets/flagssubdivisions.csv")
FONT = sans
fg, bg="black", "#EEEEEE"
default_img = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b0/No_flag.svg/1024px-No_flag.svg.png"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "{}".format(description) if description else " "
size = get_non(d, 'size')
size = "{} km²".format(size) if size else " "
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(width=318) if flag.width / flag.height > 1.8 else flag.resize((318,198))
flag = flag.pad(1 if "Ohio" not in d["name"] else 0, "grey")
return Image.from_column([
Image.from_text(d['name'].replace(r'\n','\n'), FONT(28, bold=True), align="center", beard_line=True, fg=fg),
Image.from_text(description, FONT(24, italics=True), fg=fg, beard_line=True),
Image.from_text(size, FONT(24, italics=True), fg=fg, beard_line=True),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text("Flags of the largest country subdivisions".upper(), FONT(68, bold=True), fg=fg, bg=bg, align="center").pad(30, bg).pad((0,0,0,10), bg)
footer = Image.from_text("*Antarctic territorial claims are not recognised widely internationally, though the UK, France, Australia, New Zealand and Norway\nall recognize each other's claims. "
"Some claims (specficially those of the UK, Argentina and Chile) overlap.", FONT(28), fg=fg, bg=bg).pad(10, bg)
groups = list(remove_duplicates(df.group))
groups1 = [g for g in groups if g not in ["_E"]]
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups1]
data = pd.DataFrame(array, index=groups1)
grid1 = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=0, row_label=lambda r: Image.from_text("{}".format(data.index[r]).upper(), FONT(32, bold=True), align="center", line_spacing=3) if not data.index[r].startswith("_") else None).pad((10,0),bg)
title2 = Image.from_text("Bonus: some proposed and secessionist flags".upper(), FONT(40, italics=True), fg=fg, bg=bg, align="center").pad(30, bg).pad(10, bg)
groups2 = [g for g in groups if g in ["_E"]]
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups2]
data = pd.DataFrame(array, index=groups2)
grid2 = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=0, row_label=lambda r: Image.from_text("{}".format(data.index[r]).upper(), FONT(32, bold=True), align="center", line_spacing=3) if not data.index[r].startswith("_") else None).pad((10,0),bg)
img = Image.from_column([title, grid1, footer, title2, grid2, Rectangle((0,20))], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagssubdivisions.png")
|
wetectron/engine/bbox_aug.py | akobiisr/wetectron | 332 | 12673537 | <gh_stars>100-1000
import torch
import torchvision.transforms as TT
from wetectron.config import cfg
from wetectron.data import transforms as T
from wetectron.structures.image_list import to_image_list
from wetectron.structures.bounding_box import BoxList
from wetectron.modeling.roi_heads.box_head.inference import make_roi_box_post_processor
def im_detect_bbox_aug(model, images, device, rois=None):
# Collect detections computed under different transformations
boxlists_ts = []
for _ in range(len(images)):
boxlists_ts.append([])
def add_preds_t(boxlists_t):
for i, boxlist_t in enumerate(boxlists_t):
if len(boxlists_ts[i]) == 0:
# The first one is identity transform, no need to resize the boxlist
boxlists_ts[i].append(boxlist_t)
else:
# Resize the boxlist as the first one
boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))
# Compute detections for the original image (identity transform)
boxlists_i = im_detect_bbox(
model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device, rois=rois
)
add_preds_t(boxlists_i)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
boxlists_hf = im_detect_bbox_hflip(
model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device, rois=rois
)
add_preds_t(boxlists_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
boxlists_scl = im_detect_bbox_scale(
model, images, scale, max_size, device, rois=rois
)
add_preds_t(boxlists_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
boxlists_scl_hf = im_detect_bbox_scale(
model, images, scale, max_size, device, hflip=True, rois=rois
)
add_preds_t(boxlists_scl_hf)
# Merge boxlists detected by different bbox aug params
boxlists = []
for i, boxlist_ts in enumerate(boxlists_ts):
if cfg.TEST.BBOX_AUG.HEUR == 'UNION':
bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
elif cfg.TEST.BBOX_AUG.HEUR == 'AVG':
bbox = torch.mean(torch.stack([boxlist_t.bbox for boxlist_t in boxlist_ts]) , dim=0)
scores = torch.mean(torch.stack([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts]), dim=0)
else:
raise ValueError('please use proper BBOX_AUG.HEUR ')
boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
boxlist.add_field('scores', scores)
boxlists.append(boxlist)
# Apply NMS and limit the final detections
results = []
post_processor = make_roi_box_post_processor(cfg)
for boxlist in boxlists:
results.append(post_processor.filter_results(boxlist, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))
return results
def im_detect_bbox(model, images, target_scale, target_max_size, device, rois=None):
"""
Performs bbox detection on the original image.
"""
transform = T.Compose([
T.Resize(target_scale, target_max_size),
T.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
)
])
t_images = []
t_rois = []
for image, roi in zip(images, rois):
t_img, _, t_roi = transform(image, rois=roi)
t_images.append(t_img)
t_rois.append(t_roi)
t_images = to_image_list(t_images, cfg.DATALOADER.SIZE_DIVISIBILITY)
t_rois = [r.to(device) if r is not None else None for r in t_rois]
return model(t_images.to(device), rois=t_rois)
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device, rois=None):
"""
Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
transform = T.Compose([
T.Resize(target_scale, target_max_size),
T.RandomHorizontalFlip(1.0),
T.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
)
])
t_images = []
t_rois = []
for image, roi in zip(images, rois):
t_img, _, t_roi = transform(image, rois=roi)
t_images.append(t_img)
t_rois.append(t_roi)
t_images = to_image_list(t_images, cfg.DATALOADER.SIZE_DIVISIBILITY)
t_rois = [r.to(device) if r is not None else None for r in t_rois]
boxlists = model(t_images.to(device), rois=t_rois)
# Invert the detections computed on the flipped image
boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists]
return boxlists_inv
def im_detect_bbox_scale(model, images, target_scale, target_max_size, device, hflip=False, rois=None):
"""
Computes bbox detections at the given scale.
Returns predictions in the scaled image space.
"""
if hflip:
boxlists_scl = im_detect_bbox_hflip(model, images, target_scale, target_max_size, device, rois=rois)
else:
boxlists_scl = im_detect_bbox(model, images, target_scale, target_max_size, device, rois=rois)
return boxlists_scl
|
AET/imagenet/algorithms/__init__.py | pjwu1997/teil_project | 114 | 12673574 | <reponame>pjwu1997/teil_project
from .Algorithm import *
from .UnsupervisedModel import UnsupervisedModel
from .FeatureClassificationModel import FeatureClassificationModel
|
train.py | zaidhassanch/gector | 582 | 12673592 | import argparse
import os
from random import seed
import torch
from allennlp.data.iterators import BucketIterator
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from gector.bert_token_embedder import PretrainedBertEmbedder
from gector.datareader import Seq2LabelsDatasetReader
from gector.seq2labels_model import Seq2Labels
from gector.trainer import Trainer
from gector.wordpiece_indexer import PretrainedBertIndexer
from utils.helpers import get_weights_name
def fix_seed():
torch.manual_seed(1)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed(43)
def get_token_indexers(model_name, max_pieces_per_token=5, lowercase_tokens=True, special_tokens_fix=0, is_test=False):
bert_token_indexer = PretrainedBertIndexer(
pretrained_model=model_name,
max_pieces_per_token=max_pieces_per_token,
do_lowercase=lowercase_tokens,
use_starting_offsets=True,
special_tokens_fix=special_tokens_fix,
is_test=is_test
)
return {'bert': bert_token_indexer}
def get_token_embedders(model_name, tune_bert=False, special_tokens_fix=0):
take_grads = True if tune_bert > 0 else False
bert_token_emb = PretrainedBertEmbedder(
pretrained_model=model_name,
top_layer_only=True, requires_grad=take_grads,
special_tokens_fix=special_tokens_fix)
token_embedders = {'bert': bert_token_emb}
embedder_to_indexer_map = {"bert": ["bert", "bert-offsets"]}
text_filed_emd = BasicTextFieldEmbedder(token_embedders=token_embedders,
embedder_to_indexer_map=embedder_to_indexer_map,
allow_unmatched_keys=True)
return text_filed_emd
def get_data_reader(model_name, max_len, skip_correct=False, skip_complex=0,
test_mode=False, tag_strategy="keep_one",
broken_dot_strategy="keep", lowercase_tokens=True,
max_pieces_per_token=3, tn_prob=0, tp_prob=1, special_tokens_fix=0,):
token_indexers = get_token_indexers(model_name,
max_pieces_per_token=max_pieces_per_token,
lowercase_tokens=lowercase_tokens,
special_tokens_fix=special_tokens_fix,
is_test=test_mode)
reader = Seq2LabelsDatasetReader(token_indexers=token_indexers,
max_len=max_len,
skip_correct=skip_correct,
skip_complex=skip_complex,
test_mode=test_mode,
tag_strategy=tag_strategy,
broken_dot_strategy=broken_dot_strategy,
lazy=True,
tn_prob=tn_prob,
tp_prob=tp_prob)
return reader
def get_model(model_name, vocab, tune_bert=False,
predictor_dropout=0,
label_smoothing=0.0,
confidence=0,
special_tokens_fix=0):
token_embs = get_token_embedders(model_name, tune_bert=tune_bert, special_tokens_fix=special_tokens_fix)
model = Seq2Labels(vocab=vocab,
text_field_embedder=token_embs,
predictor_dropout=predictor_dropout,
label_smoothing=label_smoothing,
confidence=confidence)
return model
def main(args):
fix_seed()
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
weights_name = get_weights_name(args.transformer_model, args.lowercase_tokens)
# read datasets
reader = get_data_reader(weights_name, args.max_len, skip_correct=bool(args.skip_correct),
skip_complex=args.skip_complex,
test_mode=False,
tag_strategy=args.tag_strategy,
lowercase_tokens=args.lowercase_tokens,
max_pieces_per_token=args.pieces_per_token,
tn_prob=args.tn_prob,
tp_prob=args.tp_prob,
special_tokens_fix=args.special_tokens_fix)
train_data = reader.read(args.train_set)
dev_data = reader.read(args.dev_set)
default_tokens = [DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN]
namespaces = ['labels', 'd_tags']
tokens_to_add = {x: default_tokens for x in namespaces}
# build vocab
if args.vocab_path:
vocab = Vocabulary.from_files(args.vocab_path)
else:
vocab = Vocabulary.from_instances(train_data,
max_vocab_size={'tokens': 30000,
'labels': args.target_vocab_size,
'd_tags': 2},
tokens_to_add=tokens_to_add)
vocab.save_to_files(os.path.join(args.model_dir, 'vocabulary'))
print("Data is loaded")
model = get_model(weights_name, vocab,
tune_bert=args.tune_bert,
predictor_dropout=args.predictor_dropout,
label_smoothing=args.label_smoothing,
special_tokens_fix=args.special_tokens_fix)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
cuda_device = list(range(torch.cuda.device_count()))
else:
cuda_device = 0
else:
cuda_device = -1
if args.pretrain:
model.load_state_dict(torch.load(os.path.join(args.pretrain_folder, args.pretrain + '.th')))
model = model.to(device)
print("Model is set")
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.1, patience=10)
instances_per_epoch = None if not args.updates_per_epoch else \
int(args.updates_per_epoch * args.batch_size * args.accumulation_size)
iterator = BucketIterator(batch_size=args.batch_size,
sorting_keys=[("tokens", "num_tokens")],
biggest_batch_first=True,
max_instances_in_memory=args.batch_size * 20000,
instances_per_epoch=instances_per_epoch,
)
iterator.index_with(vocab)
trainer = Trainer(model=model,
optimizer=optimizer,
scheduler=scheduler,
iterator=iterator,
train_dataset=train_data,
validation_dataset=dev_data,
serialization_dir=args.model_dir,
patience=args.patience,
num_epochs=args.n_epoch,
cuda_device=cuda_device,
shuffle=False,
accumulated_batch_count=args.accumulation_size,
cold_step_count=args.cold_steps_count,
cold_lr=args.cold_lr,
cuda_verbose_step=int(args.cuda_verbose_steps)
if args.cuda_verbose_steps else None
)
print("Start training")
trainer.train()
# Here's how to save the model.
out_model = os.path.join(args.model_dir, 'model.th')
with open(out_model, 'wb') as f:
torch.save(model.state_dict(), f)
print("Model is dumped")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--train_set',
help='Path to the train data', required=True)
parser.add_argument('--dev_set',
help='Path to the dev data', required=True)
parser.add_argument('--model_dir',
help='Path to the model dir', required=True)
parser.add_argument('--vocab_path',
help='Path to the model vocabulary directory.'
'If not set then build vocab from data',
default='')
parser.add_argument('--batch_size',
type=int,
help='The size of the batch.',
default=32)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=50)
parser.add_argument('--target_vocab_size',
type=int,
help='The size of target vocabularies.',
default=1000)
parser.add_argument('--n_epoch',
type=int,
help='The number of epoch for training model.',
default=20)
parser.add_argument('--patience',
type=int,
help='The number of epoch with any improvements'
' on validation set.',
default=3)
parser.add_argument('--skip_correct',
type=int,
help='If set than correct sentences will be skipped '
'by data reader.',
default=1)
parser.add_argument('--skip_complex',
type=int,
help='If set than complex corrections will be skipped '
'by data reader.',
choices=[0, 1, 2, 3, 4, 5],
default=0)
parser.add_argument('--tune_bert',
type=int,
help='If more then 0 then fine tune bert.',
default=1)
parser.add_argument('--tag_strategy',
choices=['keep_one', 'merge_all'],
help='The type of the data reader behaviour.',
default='keep_one')
parser.add_argument('--accumulation_size',
type=int,
help='How many batches do you want accumulate.',
default=4)
parser.add_argument('--lr',
type=float,
help='Set initial learning rate.',
default=1e-5)
parser.add_argument('--cold_steps_count',
type=int,
help='Whether to train only classifier layers first.',
default=4)
parser.add_argument('--cold_lr',
type=float,
help='Learning rate during cold_steps.',
default=1e-3)
parser.add_argument('--predictor_dropout',
type=float,
help='The value of dropout for predictor.',
default=0.0)
parser.add_argument('--lowercase_tokens',
type=int,
help='Whether to lowercase tokens.',
default=0)
parser.add_argument('--pieces_per_token',
type=int,
help='The max number for pieces per token.',
default=5)
parser.add_argument('--cuda_verbose_steps',
help='Number of steps after which CUDA memory information is printed. '
'Makes sense for local testing. Usually about 1000.',
default=None)
parser.add_argument('--label_smoothing',
type=float,
help='The value of parameter alpha for label smoothing.',
default=0.0)
parser.add_argument('--tn_prob',
type=float,
help='The probability to take TN from data.',
default=0)
parser.add_argument('--tp_prob',
type=float,
help='The probability to take TP from data.',
default=1)
parser.add_argument('--updates_per_epoch',
type=int,
help='If set then each epoch will contain the exact amount of updates.',
default=0)
parser.add_argument('--pretrain_folder',
help='The name of the pretrain folder.')
parser.add_argument('--pretrain',
help='The name of the pretrain weights in pretrain_folder param.',
default='')
parser.add_argument('--transformer_model',
choices=['bert', 'distilbert', 'gpt2', 'roberta', 'transformerxl', 'xlnet', 'albert'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization.',
default=1)
args = parser.parse_args()
main(args)
|
pygogo/main.py | reubano/pygogo | 301 | 12673599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
""" A Python logging library with super powers """
import sys
import itertools as it
from os import getcwd, path as p
from argparse import RawTextHelpFormatter, ArgumentParser
import pygogo as gogo
HDLRS_FULL = tuple(h for h in dir(gogo.handlers) if h.endswith("hdlr"))
HDLRS = tuple(h[:-5] for h in HDLRS_FULL)
LEVELS = ("critical", "error", "warning", "info", "debug")
FRMTRS_FULL = tuple(f for f in dir(gogo.formatters) if f.endswith("formatter"))
FORMATS = tuple(f[:-10] for f in FRMTRS_FULL)
CURDIR = p.basename(getcwd())
LOGFILE = "%s.log" % CURDIR
parser = ArgumentParser(
description="description: Logs a given message",
prog="gogo",
usage="%(prog)s [options] <message>",
formatter_class=RawTextHelpFormatter,
)
parser.add_argument(
dest="message",
nargs="?",
default=sys.stdin,
help="The message to log (default: reads from stdin).",
)
parser.add_argument(
"-l",
"--level",
metavar="LEVEL",
choices=LEVELS,
default="info",
help=(
"The level to log the message (default: info).\n"
"Must be one of: %s,\n%s.\n\n" % (", ".join(LEVELS[:4]), ", ".join(LEVELS[4:]))
),
)
parser.add_argument(
"-n", "--name", default=CURDIR, help="The logger name (default: %s).\n\n" % CURDIR
)
parser.add_argument(
"-D",
"--high-hdlr",
metavar="HANDLER",
choices=HDLRS,
default="stderr",
help=(
"The high pass log handler (default: stderr).\n"
"Must be one of: %s,\n%s.\n\n" % (", ".join(HDLRS[:4]), ", ".join(HDLRS[4:]))
),
)
parser.add_argument(
"-d",
"--low-hdlr",
metavar="HANDLER",
choices=HDLRS,
default="stdout",
help=(
"The low pass log handler (default: stdout).\n"
"Must be one of: %s,\n%s.\n\n" % (", ".join(HDLRS[:4]), ", ".join(HDLRS[4:]))
),
)
parser.add_argument(
"-L",
"--high-level",
metavar="LEVEL",
choices=LEVELS,
default="warning",
help=(
"Min level to log to the high pass handler\n"
"(default: warning). Must be one of: %s,\n%s.\n\n"
% (", ".join(LEVELS[:1]), ", ".join(LEVELS[1:]))
),
)
parser.add_argument(
"-e",
"--low-level",
metavar="LEVEL",
choices=LEVELS,
default="debug",
help=(
"Min level to log to the low pass handler\n"
"(default: debug). Must be one of: %s,\n%s.\n\n"
% (", ".join(LEVELS[:1]), ", ".join(LEVELS[1:]))
),
)
parser.add_argument(
"-F",
"--high-format",
metavar="FORMAT",
choices=FORMATS,
default="basic",
help=(
"High pass handler log format (default: basic)."
"\nMust be one of: %s,\n%s.\n\n"
% (", ".join(FORMATS[:4]), ", ".join(FORMATS[4:]))
),
)
parser.add_argument(
"-o",
"--low-format",
metavar="FORMAT",
choices=FORMATS,
default="basic",
help=(
"Low pass handler log format (default: basic)."
"\nMust be one of: %s,\n%s.\n\n"
% (", ".join(FORMATS[:4]), ", ".join(FORMATS[4:]))
),
)
parser.add_argument(
"-m",
"--monolog",
action="store_true",
default=False,
help="Log high level events only to high pass handler.\n\n",
)
parser.add_argument(
"-f",
"--filename",
action="append",
default=[LOGFILE],
help=(
"The filename to log to (default: %s).\nUsed in the following "
"handlers: file.\n\n"
)
% LOGFILE,
)
parser.add_argument(
"-s",
"--subject",
default=["You've got mail"],
action="append",
help=(
"The log subject (default: You've got mail)."
"\nUsed in the following handlers: email.\n\n"
),
)
parser.add_argument(
"-u",
"--url",
action="append",
default=[""],
help="The log url. Required for the following handlers:\nwebhook.\n\n",
)
parser.add_argument(
"-H",
"--host",
default=["localhost"],
action="append",
help=(
"The host (default: localhost).\nUsed in the following handlers: "
"socket and syslog.\n\n"
),
)
parser.add_argument(
"-p",
"--port",
metavar="NUM",
type=int,
action="append",
default=[""],
help=(
"The port number (default: Python logging default).\nUsed in the "
"following handlers: socket and syslog.\n\n"
),
)
parser.add_argument(
"-t",
"--tcp",
action="count",
default=0,
help=(
"Use TCP instead of UDP.\nUsed in the following handlers: socket and "
"syslog.\n\n"
),
)
parser.add_argument(
"-g",
"--get",
action="count",
default=0,
help=(
"Use a GET request instead of POST.\nUsed in the following handlers: "
"webhook.\n\n"
),
)
parser.add_argument(
"-v", "--version", help="Show version and exit.", action="store_true", default=False
)
parser.add_argument(
"-V",
"--verbose",
help="Increase output verbosity.",
action="store_true",
default=False,
)
def run():
"""CLI runner
"""
args = parser.parse_args()
gogo_logger = gogo.Gogo(__name__, verbose=args.verbose).get_logger("run")
if args.version:
gogo_logger.info("gogo v%s" % gogo.__version__)
exit(0)
counted = {"get", "tcp"}
appended = {"filename", "subject", "url", "host", "port"}
items = args._get_kwargs()
counted_args = [i for i in items if i[0] in counted]
appended_args = [i for i in items if i[0] in appended]
high_appended_args = [(k, v[0]) for k, v in appended_args]
high_counted_args = [(k, v > 0) for k, v in counted_args]
high_kwargs = dict(it.chain(high_appended_args, high_counted_args))
low_appended_args = [(k, v[-1]) for k, v in appended_args]
low_counted_args = [(k, v > 1) for k, v in counted_args]
low_kwargs = dict(it.chain(low_appended_args, low_counted_args))
high_hdlr = getattr(gogo.handlers, "%s_hdlr" % args.high_hdlr)
low_hdlr = getattr(gogo.handlers, "%s_hdlr" % args.low_hdlr)
high_format = getattr(gogo.formatters, "%s_formatter" % args.high_format)
low_format = getattr(gogo.formatters, "%s_formatter" % args.low_format)
nkwargs = {
"verbose": args.verbose,
"high_level": args.high_level.upper(),
"low_level": args.low_level.upper(),
"high_formatter": high_format,
"low_formatter": low_format,
"monolog": args.monolog,
"high_hdlr": high_hdlr(**high_kwargs),
"low_hdlr": low_hdlr(**low_kwargs),
}
logger = gogo.Gogo(args.name, **nkwargs).get_logger("runner")
try:
message = args.message.read()
except AttributeError:
message = args.message
getattr(logger, args.level)(message)
exit(0)
if __name__ == "__main__":
run()
|
test/nn/test_upsampling.py | steven-lang/e2cnn | 356 | 12673621 | import unittest
from unittest import TestCase
from e2cnn.nn import *
from e2cnn.gspaces import *
import numpy as np
class TestUpsampling(TestCase):
def test_cyclic_even_bilinear(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "bilinear")
def test_cyclic_odd_bilinear(self):
g = Rot2dOnR2(9)
self.check_upsampling(g, "bilinear")
def test_dihedral_even_bilinear(self):
g = FlipRot2dOnR2(8)
self.check_upsampling(g, "bilinear")
def test_dihedral_odd_bilinear(self):
g = Rot2dOnR2(9)
self.check_upsampling(g, "bilinear")
def test_so2_bilinear(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "bilinear")
def test_o2_bilinear(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "bilinear")
# "NEAREST" method is not equivariant!! As a result, all the following tests fail
def test_cyclic_even_nearest(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "nearest")
def test_cyclic_odd_nearest(self):
g = Rot2dOnR2(9)
self.check_upsampling(g, "nearest")
def test_dihedral_even_nearest(self):
g = FlipRot2dOnR2(8)
self.check_upsampling(g, "nearest")
def test_dihedral_odd_nearest(self):
g = Rot2dOnR2(9)
self.check_upsampling(g, "nearest")
def test_so2_nearest(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "nearest")
def test_o2_nearest(self):
g = Rot2dOnR2(8)
self.check_upsampling(g, "nearest")
def check_upsampling(self, g, mode):
for s in [2, 3, 5]:
print(f"\nScale: {s}\n")
for r in g.representations.values():
r1 = FieldType(g, [r])
ul = R2Upsampling(r1, mode=mode, scale_factor=s)
ul.check_equivariance()
if __name__ == '__main__':
unittest.main()
|
third_party/blink/renderer/build/scripts/keyword_utils.py | zealoussnow/chromium | 14,668 | 12673629 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json5_generator
def sort_keyword_properties_by_canonical_order(
css_properties, css_value_keywords_file, json5_file_parameters):
"""Sort all keyword CSS properties by the order of the keyword in
css_value_keywords.json5
Args:
css_properties: css_properties excluding extra fields.
css_value_keywords_file: file containing all css keywords.
json5_file_parameters: current context json5 parameters.
Returns:
New css_properties object with sorted keywords.
"""
css_values_dictionary = json5_generator.Json5File.load_from_files(
[css_value_keywords_file],
default_parameters=json5_file_parameters).name_dictionaries
css_values_dictionary = [x['name'].original for x in css_values_dictionary]
name_to_position_dictionary = dict(
zip(css_values_dictionary, range(len(css_values_dictionary))))
for css_property in css_properties:
if css_property['field_template'] == 'keyword' and len(
css_property['include_paths']) == 0:
css_property['keywords'] = sorted(
css_property['keywords'],
key=lambda x: name_to_position_dictionary[x])
return css_properties
|
laspy/vlrs/__init__.py | CCInc/laspy | 240 | 12673642 | from . import geotiff
from .known import BaseKnownVLR
from .vlr import VLR
|
tests/notebooks/mirror/ipynb_to_script_vscode_folding_markers/jupyter_with_raw_cell_in_body.py | st--/jupytext | 5,378 | 12673644 | <reponame>st--/jupytext
# ---
# jupyter:
# jupytext:
# cell_markers: region,endregion
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
1+2+3
# region active=""
# This is a raw cell
# endregion
# This is a markdown cell
|
lale/lib/autogen/kernel_pca.py | mfeffer/lale | 265 | 12673647 | <reponame>mfeffer/lale<filename>lale/lib/autogen/kernel_pca.py
from numpy import inf, nan
from sklearn.decomposition import KernelPCA as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _KernelPCAImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for KernelPCA Kernel Principal component analysis (KPCA)",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"kernel",
"gamma",
"degree",
"coef0",
"kernel_params",
"alpha",
"fit_inverse_transform",
"eigen_solver",
"tol",
"max_iter",
"remove_zero_eig",
"random_state",
"copy_X",
"n_jobs",
],
"relevantToOptimizer": [
"n_components",
"kernel",
"degree",
"coef0",
"alpha",
"eigen_solver",
"tol",
"max_iter",
"remove_zero_eig",
"copy_X",
],
"additionalProperties": False,
"properties": {
"n_components": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Number of components",
},
"kernel": {
"enum": [
"linear",
"poly",
"rbf",
"sigmoid",
"cosine",
"precomputed",
],
"default": "linear",
"description": "Kernel",
},
"gamma": {
"XXX TODO XXX": "float, default=1/n_features",
"description": "Kernel coefficient for rbf, poly and sigmoid kernels",
"enum": [None],
"default": None,
},
"degree": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 3,
"distribution": "uniform",
"default": 3,
"description": "Degree for poly kernels",
},
"coef0": {
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
"default": 1,
"description": "Independent term in poly and sigmoid kernels",
},
"kernel_params": {
"XXX TODO XXX": "mapping of string to any, default=None",
"description": "Parameters (keyword arguments) and values for kernel passed as callable object",
"enum": [None],
"default": None,
},
"alpha": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 1e-10,
"maximumForOptimizer": 1.0,
"distribution": "loguniform",
},
],
"default": 1.0,
"description": "Hyperparameter of the ridge regression that learns the inverse transform (when fit_inverse_transform=True).",
},
"fit_inverse_transform": {
"type": "boolean",
"default": False,
"description": "Learn the inverse transform for non-precomputed kernels",
},
"eigen_solver": {
"enum": ["auto", "dense", "arpack"],
"default": "auto",
"description": "Select eigensolver to use",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"distribution": "loguniform",
"default": 0,
"description": "Convergence tolerance for arpack",
},
"max_iter": {
"anyOf": [
{
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
},
{"enum": [None]},
],
"default": None,
"description": "Maximum number of iterations for arpack",
},
"remove_zero_eig": {
"type": "boolean",
"default": False,
"description": "If True, then all components with zero eigenvalues are removed, so that the number of components in the output may be < n_components (and sometimes even zero due to numerical instability)",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If True, input X is copied and stored by the model in the `X_fit_` attribute",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of parallel jobs to run",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit the model from data in X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training vector, where n_samples in the number of samples and n_features is the number of features.",
}
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform X.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.KernelPCA#sklearn-decomposition-kernelpca",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
KernelPCA = make_operator(_KernelPCAImpl, _combined_schemas)
set_docstrings(KernelPCA)
|
seahub/api2/endpoints/public_repos_search.py | samuelduann/seahub | 420 | 12673659 | # -*- coding: utf-8 -*-
import logging
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework import status
from seaserv import seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.utils.repo import is_valid_repo_id_format
from seahub.utils import HAS_FILE_SEARCH
from seahub.wiki.models import Wiki
if HAS_FILE_SEARCH:
from seahub.search.utils import search_files
logger = logging.getLogger('seafes')
class PublishedRepoSearchView(APIView):
""" Search public repos
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticatedOrReadOnly,)
throttle_classes = (UserRateThrottle, )
def get(self, request):
# is search supported
if not HAS_FILE_SEARCH:
error_msg = 'Search not supported.'
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# argument check
keyword = request.GET.get('q', None)
if not keyword:
error_msg = 'q invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id = request.GET.get('repo_id', None)
if not is_valid_repo_id_format(repo_id):
error_msg = 'repo_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# recourse check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
wiki = Wiki.objects.filter(repo_id=repo_id)[0]
if not wiki.has_read_perm(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '10'))
if per_page > 100:
per_page = 100
except ValueError:
current_page = 1
per_page = 10
start = (current_page - 1) * per_page
size = per_page
if start < 0 or size < 0:
error_msg = 'page or per_page invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id_map = {}
map_id = repo.origin_repo_id if repo.origin_repo_id else repo_id
repo_id_map[map_id] = repo
# search file
try:
results, total = search_files(
repo_id_map, None, keyword, None, start, size, org_id=None
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
for result in results:
result.pop('repo', None)
result.pop('exists', None)
result.pop('last_modified_by', None)
result.pop('name_highlight', None)
result.pop('score', None)
result['repo_type'] = 'public'
has_more = True if total > current_page * per_page else False
return Response({
"total": total,
"results": results,
"has_more": has_more
})
|
example/app_namespace.py | prabhpreet332/Flask-SocketIO | 4,639 | 12673683 | <filename>example/app_namespace.py<gh_stars>1000+
from threading import Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, Namespace, emit, join_room, leave_room, \
close_room, rooms, disconnect
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
socketio.sleep(10)
count += 1
socketio.emit('my_response',
{'data': 'Server generated event', 'count': count})
@app.route('/')
def index():
return render_template('index.html', async_mode=socketio.async_mode)
class MyNamespace(Namespace):
def on_my_event(self, message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']})
def on_my_broadcast_event(self, message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
broadcast=True)
def on_join(self, message):
join_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
def on_leave(self, message):
leave_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
def on_close_room(self, message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',
'count': session['receive_count']},
room=message['room'])
close_room(message['room'])
def on_my_room_event(self, message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
room=message['room'])
def on_disconnect_request(self):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'Disconnected!', 'count': session['receive_count']})
disconnect()
def on_my_ping(self):
emit('my_pong')
def on_connect(self):
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(background_thread)
emit('my_response', {'data': 'Connected', 'count': 0})
def on_disconnect(self):
print('Client disconnected', request.sid)
socketio.on_namespace(MyNamespace('/'))
if __name__ == '__main__':
socketio.run(app)
|
Python_Discord_Bot_JE/venv/Lib/site-packages/pip/_vendor/chardet/universaldetector.py | JE-Chen/je_old_repo | 548 | 12673716 | <filename>Python_Discord_Bot_JE/venv/Lib/site-packages/pip/_vendor/chardet/universaldetector.py
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> - port to Python
# <NAME> - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
"""
Module containing the UniversalDetector detector class, which is the primary
class a user of ``chardet`` should use.
:author: <NAME> (initial port to Python)
:author: <NAME> (original C code)
:author: <NAME> (major refactoring for 3.0)
:author: <NAME>
"""
import codecs
import logging
import re
from .charsetgroupprober import CharSetGroupProber
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
'iso-8859-2': 'Windows-1250',
'iso-8859-5': 'Windows-1251',
'iso-8859-6': 'Windows-1256',
'iso-8859-7': 'Windows-1253',
'iso-8859-8': 'Windows-1255',
'iso-8859-9': 'Windows-1254',
'iso-8859-13': 'Windows-1257'}
def __init__(self, lang_filter=LanguageFilter.ALL):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self._has_win_bytes = None
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
if not isinstance(byte_str, bytearray):
byte_str = bytearray(byte_str)
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16",
'confidence': 1.0,
'language': ''}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.ESC_ASCII:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence(),
'language':
self._esc_charset_prober.language}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.HIGH_BYTE:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence(),
'language': prober.language}
self.done = True
break
if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`.
"""
# Don't bother with checks if we're already done
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug('no data received!')
# Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii',
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE:
prober_confidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
prober_confidence = prober.get_confidence()
if prober_confidence > max_prober_confidence:
max_prober_confidence = prober_confidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower()
confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'):
if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
charset_name)
self.result = {'encoding': charset_name,
'confidence': confidence,
'language': max_prober.language}
# Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() == logging.DEBUG:
if self.result['encoding'] is None:
self.logger.debug('no probers hit minimum threshold')
for group_prober in self._charset_probers:
if not group_prober:
continue
if isinstance(group_prober, CharSetGroupProber):
for prober in group_prober.probers:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
else:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result
|
mindinsight/datavisual/data_transform/graph/node_tree.py | mindspore-ai/mindinsight | 216 | 12673730 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This file is used to define the node of graph and associated base types.
"""
from mindinsight.utils.exceptions import ParamValueError
from mindinsight.datavisual.common.log import logger as log
class NodeTree:
"""A class for building a node tree."""
def __init__(self, node_name='', node_type=None):
self.node_name = node_name
self._node_type = node_type
self._children = {}
@property
def node_type(self):
"""The property of node type."""
return self._node_type
@node_type.setter
def node_type(self, value):
"""Set the node type."""
self._node_type = value
def add(self, name, node_type=None):
"""Add sub node."""
sub_name = '/'.join([self.node_name, name]) if self.node_name else name
sub_node = NodeTree(sub_name, node_type)
self._children[name] = sub_node
return sub_node
def get(self, sub_name):
"""Get sub node."""
return self._children.get(sub_name)
def get_children(self):
"""Get all childrens."""
for name_scope, sub_node in self._children.items():
yield name_scope, sub_node
def remove(self, sub_name):
"""Remove sub node."""
try:
self._children.pop(sub_name)
except KeyError as err:
log.error("Failed to find node %s. %s", sub_name, err)
raise ParamValueError("Failed to find node {}".format(sub_name))
|
Patterns/VritikaMalhotra_Pattern.py | sanchit781/HACKTOBERFEST2021_PATTERN | 229 | 12673737 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
def contnum(n):
# initializing starting number
num = 1
# outer loop to handle number of rows
for i in range(0, n):
# inner loop to handle number of columns
# values changing acc. to outer loop
for j in range(0, i+1):
# printing number
print(num, end=" ")
# incrementing number at each column
num = num + 1
# ending line after each row
print("\r")
n = 5
# sending 5 as argument
# calling Function
contnum(n)
# In[ ]:
|
migrations/versions/15bd4b7e6622_add_filecoverage_unique_constraint.py | vault-the/changes | 443 | 12673740 | <filename>migrations/versions/15bd4b7e6622_add_filecoverage_unique_constraint.py
"""Add FileCoverage unique constraint
Revision ID: 15bd4b7e6622
Revises: <PASSWORD>
Create Date: 2014-05-09 11:06:50.845168
"""
# revision identifiers, used by Alembic.
revision = '15bd4b7e6622'
down_revision = '3d8177ef<PASSWORD>'
from alembic import op
def upgrade():
op.create_unique_constraint('unq_job_filname', 'filecoverage', ['job_id', 'filename'])
def downgrade():
op.drop_constraint('unq_job_filname', 'filecoverage')
|
xcit.py | artsousa/xcit | 578 | 12673741 | <gh_stars>100-1000
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implementation of Cross-Covariance Image Transformer (XCiT)
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
import math
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import _cfg, Mlp
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the
"Attention is all of Need" paper. The implementation builds on DeTR code
https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, B, H, W):
mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return torch.nn.Sequential(
nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
),
nn.SyncBatchNorm(out_planes)
)
class ConvPatchEmbed(nn.Module):
""" Image to Patch Embedding using multiple convolutional layers
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size[0] == 16:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 8, 2),
nn.GELU(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size[0] == 8:
self.proj = torch.nn.Sequential(
conv3x3(3, embed_dim // 4, 2),
nn.GELU(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
nn.GELU(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
else:
raise("For convolutional projection, patch size has to be in [8, 16]")
def forward(self, x, padding_size=None):
B, C, H, W = x.shape
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class LPI(nn.Module):
"""
Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows
to augment the implicit communcation performed by the block diagonal scatter attention.
Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
drop=0., kernel_size=3):
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
self.act = act_layer()
self.bn = nn.SyncBatchNorm(in_features)
self.conv2 = torch.nn.Conv2d(in_features, out_features, kernel_size=kernel_size,
padding=padding, groups=out_features)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttention(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
qc = q[:, :, 0:1] # CLS token
attn_cls = (qc * k).sum(dim=-1) * self.scale
attn_cls = attn_cls.softmax(dim=-1)
attn_cls = self.attn_drop(attn_cls)
cls_tkn = (attn_cls.unsqueeze(2) @ v).transpose(1, 2).reshape(B, 1, C)
cls_tkn = self.proj(cls_tkn)
x = torch.cat([self.proj_drop(cls_tkn), x[:, 1:]], dim=1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=None,
tokens_norm=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS
self.tokens_norm = tokens_norm
def forward(self, x, H, W, mask=None):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
if self.tokens_norm:
x = self.norm2(x)
else:
x[:, 0:1] = self.norm2(x[:, 0:1])
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
return x
class XCA(nn.Module):
""" Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted
sum. The weights are obtained from the (softmax normalized) Cross-covariance
matrix (Q^T K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class XCABlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
num_tokens=196, eta=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = XCA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
drop=drop)
self.norm3 = norm_layer(dim)
self.local_mp = LPI(in_features=dim, act_layer=act_layer)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H, W):
x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class XCiT(nn.Module):
"""
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
cls_attn_layers=2, use_pos=True, patch_proj='linear', eta=None, tokens_norm=False):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
cls_attn_layers: (int) Depth of Class attention layers
use_pos: (bool) whether to use positional encoding
eta: (float) layerscale initialization value
tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
patch_size=patch_size)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.ModuleList([
XCABlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
norm_layer=norm_layer, num_tokens=num_patches, eta=eta)
for i in range(depth)])
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer,
eta=eta, tokens_norm=tokens_norm)
for i in range(cls_attn_layers)])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.pos_embeder = PositionalEncodingFourier(dim=embed_dim)
self.use_pos = use_pos
# Classifier head
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos:
pos_encoding = self.pos_embeder(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x, Hp, Wp)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x, Hp, Wp)
x = self.norm(x)[:, 0]
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
if self.training:
return x, x
else:
return x
# Patch size 16x16 models
@register_model
def xcit_nano_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=128, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=False, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_12_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_medium_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_large_24_p16(pretrained=False, **kwargs):
model = XCiT(
patch_size=16, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
# Patch size 8x8 models
@register_model
def xcit_nano_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=128, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=False, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_12_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_tiny_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_small_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_medium_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def xcit_large_24_p8(pretrained=False, **kwargs):
model = XCiT(
patch_size=8, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, **kwargs)
model.default_cfg = _cfg()
return model
|
python2/pracmln/mln/database.py | seba90/pracmln | 123 | 12673744 | #
# Markov Logic Networks -- Databases
#
# (C) 2006-2015 by <NAME>, (<EMAIL>)
# <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.'''
from dnutils import logs, ifnone, out
from dnutils.console import barstr
from util import stripComments, mergedom
from pracmln.logic.common import Logic
from errors import NoSuchPredicateError
from pracmln.logic.fol import FirstOrderLogic
import os
from StringIO import StringIO
import sys
from pracmln.mln.util import colorize
from pracmln.mln.errors import MLNParsingError
import traceback
from collections import defaultdict
import re
from pracmln.utils.project import mlnpath
logger = logs.getlogger(__name__)
class Database(object):
'''
Represents an MLN Database, which consists of a set of ground atoms, each assigned a truth value.
:member mln: the respective :class:`mln.base.MLN` object that this Database is associated with
:member domains: dict mapping the variable domains specific to this data base (i.e. without
values from the MLN domains which are not present in the DB) to the set possible values.
:member evidence: dictionary mapping ground atom strings to truth values.
:param mln: the :class:`mln.base.MLN` instance that the database shall be associated with.
:param evidence: a dictionary mapping ground atoms to their truth values.
:param dbfile: if specified, a database is loaded from the given file path.
:param ignore_unknown_preds: see :func:`mln.database.parse_db`
'''
def __init__(self, mln, evidence=None, dbfile=None, ignore_unknown_preds=False):
self.mln = mln
self._domains = defaultdict(list)
self._evidence = {}
if dbfile is not None:
Database.load(mln, dbfile, db=self, ignore_unknown_preds=ignore_unknown_preds)
if evidence is not None:
for atom, truth in evidence.iteritems():
self.add(atom, truth)
@property
def domains(self):
return self._domains
@domains.setter
def domains(self, doms):
self._domains = doms
@property
def evidence(self):
return dict(self._evidence)
def _atomstr(self, gndatom):
'''
Converts gndatom into a valid ground atom string representation.
'''
if type(gndatom) is str:
_, predname, args = self.mln.logic.parse_literal(gndatom)
atomstr = str(self.mln.logic.gnd_atom(predname, args, self.mln))
elif isinstance(gndatom, Logic.GroundAtom):
atomstr = str(gndatom)
elif isinstance(gndatom, Logic.GroundLit):
atomstr = str(gndatom.gndatom)
predname = gndatom.gndatom.predname
args = gndatom.gndatom.params
elif isinstance(gndatom, Logic.Lit):
atomstr = str(self.mln.logic.gnd_atom(gndatom.predname, gndatom.args, self.mln))
return atomstr
def truth(self, gndatom):
'''
Returns the evidence truth value of the given ground atom.
:param gndatom: a ground atom string
:returns: the truth value of the ground atom, or None if it is not specified.
'''
atomstr = self._atomstr(gndatom)
return self._evidence.get(atomstr)
def domain(self, domain):
'''
Returns the list of domain values of the given domain, or None if no domain
with the given name exists. If domain is dict, the domain values will be
updated and the domain will be created, if necessary.
:param domain: the name of the domain to be returned.
'''
if type(domain) is dict:
for domname, values in domain.iteritems():
if type(values) is not list: values = [values]
dom = self.domain(domname)
if dom is None:
dom = []
self._domains[domname] = dom
for value in values:
if value not in dom: dom.append(value)
elif domain is not None:
return self._domains.get(domain)
else:
return self._domains
def copy(self, mln=None):
'''
Returns a copy this Database. If mln is specified, asserts
this database for the given MLN.
:param mln: if `mln` is specified, the new MLN will be associated with `mln`,
if not, it will be associated with `self.mln`.
'''
if mln is None:
mln = self.mln
db = Database(mln)
for atom, truth in self.gndatoms():
try: db.add(atom, truth)
except NoSuchPredicateError: pass
return db
def union(self, dbs, mln=None):
'''
Returns a new database consisting of the union of all databases
given in the arguments. If mln is specified, the new database will
be attached to that one, otherwise the mln of this database will
be used.
'''
db_ = Database(mln if mln is not None else self.mln)
if type(dbs) is list:
dbs = [list(d) for d in dbs] + list(self)
if type(dbs) is Database:
dbs = list(dbs) + list(self)
for atom, truth in dbs:
try: db_ << (atom, truth)
except NoSuchPredicateError: pass
return db_
def gndatoms(self, prednames=None):
'''
Iterates over all ground atoms in this database that match any of
the given predicate names. If no predicate name is specified, it
yields all ground atoms in the database.
:param prednames: a list of predicate names that this iteration should be filtered by.
:returns: a generator of (atom, truth) tuples.
'''
for atom, truth in self:
if prednames is not None:
_, predname, _ = self.mln.logic.parse_literal(atom)
if not predname in prednames: continue
yield atom, truth
def add(self, gndlit, truth=1):
'''
Adds the fact represented by the ground atom, which might be
a GroundLit object or a string.
:param gndlit: the ground literal to be added to the database.
Can be either a string or a :class:`logic.common.Logic.GroundLit` instance.
:param truth: the truth value of this ground literal. 0 stands for false, 1 for true.
In case of soft or fuzzy evidence, any truth value in [0,1] is allowed.
'''
if isinstance(gndlit, unicode):
gndlit = gndlit.encode('utf-8')
if isinstance(gndlit, str):
true, predname, args = self.mln.logic.parse_literal(gndlit)
atom_str = str(self.mln.logic.gnd_atom(predname, args, self.mln))
elif isinstance(gndlit, Logic.GroundLit):
atom_str = str(gndlit.gndatom)
true = not gndlit.negated
predname = gndlit.gndatom.predname
args = gndlit.gndatom.args
else:
raise Exception('gndlit has an illegal type: %s' % type(gndlit))
if truth in (True, False):
truth = {True: 1, False: 0}[truth]
truth = truth if true else 1 - truth
truth = eval('%.6f' % truth)
pred = self.mln.predicate(predname)
if pred is None:
raise NoSuchPredicateError('No such predicate: %s' % predname)
if len(pred.argdoms) != len(args):
raise Exception('Invalid number of arguments: %s' % str(gndlit))
if not all(map(lambda a: not self.mln.logic.isvar(a), args)):
raise Exception('No variables are allowed in databases. Only ground atoms: %s' % atom_str)
# update the domains
for domname, arg in zip(pred.argdoms, args):
self.domain({domname: arg})
self._evidence[atom_str] = truth
return self
def ishard(self):
'''
Determines whether or not this database contains exclusively
hard evidences.
'''
return any(map(lambda x: x != 1 and x != 0, self._evidence))
def tofile(self, filename):
'''
Writes this database into the file with the given filename.
'''
f = open(filename, 'w+')
self.write(f)
f.close()
def write(self, stream=sys.stdout, color=None, bars=True):
'''
Writes this database into the stream in the MLN Database format.
The stream must provide a `write()` method as file objects do.
'''
if color is None:
if stream != sys.stdout:
color = False
else: color = True
for atom in sorted(self._evidence):
truth = self._evidence[atom]
pred, params = self.mln.logic.parse_atom(atom)
pred = str(pred)
params = map(str, params)
if bars:
bar = barstr(30, truth, color='magenta' if color else None)
else:
bar = ''
if color:
strout = '%s %s\n' % (bar if bars else colorize('%.6f' % truth, (None, 'magenta', False), True),
FirstOrderLogic.Lit(False, pred, params, self.mln).cstr(color))
else:
strout = '%s %s\n' % (bar if bars else '%.6f' % truth, FirstOrderLogic.Lit(False, pred, params, self.mln).cstr(color))
stream.write(strout)
def retract(self, gndatom):
'''
Removes the evidence of the given ground atom in this database.
Also cleans up the domains if an atom is removed that makes use of
a domain value that is not used by any other evidence atom.
:param gndatom: a string representation of the ground atom to be
removed from the database or a :class:`logic.common.Logic.GroundAtom` instance.
'''
if type(gndatom) is str:
_, predname, args = self.mln.logic.parse_literal(gndatom)
atom_str = str(self.mln.logic.gnd_atom(predname, args, self.mln))
elif isinstance(gndatom, Logic.GroundAtom):
atom_str = str(gndatom.gndatom)
args = gndatom.args
else:
raise Exception('gndatom has an illegal type: %s' % str(type(gndatom)))
if atom_str not in self: return
del self._evidence[atom_str]
doms = self.mln.predicate(predname).argdoms
dontremove = set()
for atom, _ in self:
_, predname_, args_ = self.mln.logic.parse_literal(atom)
doms_ = self.mln.predicate(predname_).argdoms
for arg, arg_, dom, dom_ in zip(args, args_, doms, doms_):
if arg == arg_ and dom == dom_: dontremove.add((dom, arg))
for (dom, arg) in zip(doms, args):
if (dom, arg) not in dontremove:
if arg in self._domains[dom]:
self._domains[dom].remove(arg)
if not self.domain(dom): del self._domains[dom]
def retractall(self, predname):
'''
Retracts all evidence atoms of the given predicate name in this database.
'''
for a, _ in dict(self._evidence).iteritems():
_, pred, _ = self.mln.logic.parse_literal(a)
if pred == predname: del self[a]
def rmval(self, domain, value):
for atom in list(self.evidence):
_, predname, args = self.mln.logic.parse_literal(atom)
for dom, val in zip(self.mln.predicate(predname).argdoms, args):
if dom == domain and val == value:
del self._evidence[atom]
self.domains[domain].remove(value)
def __iter__(self):
for atom, truth in self._evidence.iteritems():
yield atom, truth
def __add__(self, other):
return self.union(other, mln=self.mln)
def __iadd__(self, other):
return self.union(other, mln=self.mln)
def __setitem__(self, atom, truth):
self.add(atom, truth)
def __getitem__(self, atom):
return self.evidence.get(atom)
def __lshift__(self, arg):
if type(arg) is tuple:
if len(arg) != 2:
raise Exception('Illegal argument arg: %s' % str(arg))
self.add(arg[0], float(arg[1]))
elif isinstance(arg, basestring):
self.add(arg)
else:
raise ValueError('illegal argument: must be string or <string, truth> pair. Got: %s' % repr(arg))
def __rshift__(self, atom):
self.retract(atom)
def __contains__(self, el):
atomstr = self._atomstr(el)
return atomstr in self._evidence
def __delitem__(self, item):
self.retract(item)
def __len__(self):
return len(self.evidence)
def isempty(self):
'''
Returns True iff there is an assertion for any ground atom in this
database and False if the truth values all ground atoms are None
AND all domains are empty.
'''
return not any(map(lambda x: x >= 0 and x <= 1, self._evidence.values())) and \
len(self.domains) == 0
def query(self, formula, thr=1):
'''
Makes to the database a 'prolog-like' query given by the specified formula.
Returns a generator of dictionaries with variable-value assignments for which the formula has
a truth value of at least `truth_thr`.
:param formula: the formula the database shall be queried with.
:param truth_thr: the threshold for truth values.
.. warning::
This is *very* inefficient, since all groundings will be instantiated; so keep the queries short.
:Example:
>>> for r in db.query('foo(?x, ?y)'):
>>> print r
>>>
{'?x': 'X1', '?y': 'Y1'}
{'?x': 'X2', '?y': 'Y2'}
'''
mrf = Database.PseudoMRF(self)
formula = self.mln.logic.parse_formula(formula)
for assignment in mrf.iter_true_var_assignments(formula, truth_thr=thr):
yield assignment
@staticmethod
def write_dbs(dbs, stream=sys.stdout, color=None, bars=False):
if color is None:
if stream != sys.stdout:
color = False
else: color = True
strdbs = []
for db in dbs:
s = StringIO()
db.write(s, color=color, bars=bars)
strdbs.append(s.getvalue())
s.close()
stream.write('---\n'.join(strdbs))
@staticmethod
def load(mln, dbfiles, ignore_unknown_preds=False, db=None):
'''
Reads one or multiple database files containing literals and/or domains.
Returns one or multiple databases where domains is dictionary mapping
domain names to lists of constants defined in the database
and evidence is a dictionary mapping ground atom strings to truth values
:param dbfile: a single one or a list of paths to database file.
:param mln: the MLN object which should be used to load the database.
:returns: either one single or a list of database objects.
:Example:
>>> mln = MLN()
>>> db = Database.load(mln, './example.db')
'''
if type(dbfiles) is not list:
dbfiles = [dbfiles]
dbs = []
for dbpath in dbfiles:
if isinstance(dbpath, basestring):
dbpath = mlnpath(dbpath)
if isinstance(dbpath, mlnpath):
projectpath = None
if dbpath.project is not None:
projectpath = dbpath.projectloc
dirs = [os.path.dirname(fp) for fp in dbfiles]
dbs_ = parse_db(mln, content=dbpath.content, ignore_unknown_preds=ignore_unknown_preds, db=db, dirs=dirs, projectpath=projectpath)
dbs.extend(dbs_)
else:
raise Exception('Illegal db file specifier: %s' % dbpath)
if len(dbs) > 1 and db is not None:
raise Exception('Cannot attach multiple databases to a single database object. Use Database.load(..., db=None).')
else:
return dbs
class PseudoMRF(object):
'''
can be used in order to use only a Database object to ground formulas
(without instantiating an MRF) and determine the truth of these ground
formulas by partly replicating the interface of an MRF object
'''
def __init__(self, db):
self.mln = db.mln
self.domains = mergedom(self.mln.domains, db.domains)
self.gndatoms = Database.PseudoMRF.GroundAtomGen()
# duplicate the database to avoid side effects
self.evidence = Database.PseudoMRF.WorldValues(db.copy())
class GroundAtomGen(object):
def __getitem__(self, gndAtomName):
return Database.PseudoMRF.TextGroundAtom(gndAtomName)
def get(self, key, default=None):
return self[key]
class TextGroundAtom(object):
def __init__(self, name):
self.name = self.idx = name
def truth(self, world):
return world[self.name]
def __str__(self):
return self.name
def simplify(self, mrf):
return self
class WorldValues(object):
def __init__(self, db):
self.db = db
def __getitem__(self, atomstr):
return self.db._evidence.get(atomstr, 0)
def iter_groundings(self, formula):
for t in formula.iter_groundings(self):
yield t
def truth(self, gndformula):
return gndformula.truth(self.evidence)
def count_true_groundings(self, formula):
numTotal = 0
numTrue = 0
for gf, _ in self.iter_groundings(formula):
numTotal += 1
numTrue += gf.truth(self.evidence)
return (numTrue, numTotal)
def gndatom(self, atom):
return self.gndatoms.get(atom)
def iter_true_var_assignments(self, formula, truth_thr=1.0):
'''
Iterates over all groundings of formula that evaluate to true
given this Pseudo-MRF.
'''
for assignment in formula.iter_true_var_assignments(self, self.evidence, truth_thr=truth_thr):
yield assignment
def parse_db(mln, content, ignore_unknown_preds=False, db=None, dirs=['.'], projectpath=None):
'''
Reads one or more databases in a string representation and returns
the respective Database objects.
:param mln: the MLN object which should be used to load the database.
:param content: the string representation of one or multiple ('---'-separated)
databases
:param ignore_unknown_preds: by default this function raises an Exception when it encounters
a predicate in the DB that has not been declared in the associated
MLN. ignore_unknown_preds=True simply ignores such predicates.
:param db: the Database object that shall receive the facts stored in the new DB.
If None, a new `Database` object will be created.
'''
log = logs.getlogger('db')
content = stripComments(content)
allow_multiple = True
if db is None:
allow_multiple = True
db = Database(mln, ignore_unknown_preds=ignore_unknown_preds)
dbs = []
# expand domains with dbtext constants and save evidence
for line, l in enumerate(content.split("\n")):
l = l.strip()
if l == '':
continue
# separator between independent databases
elif l == '---' and not db.isempty():
dbs.append(db)
db = Database(mln)
continue
# domain declaration
elif "{" in l:
domname, constants = db.mln.logic.parse_domain(l)
domnames = [domname for _ in constants]
# include
elif l.startswith('#include'):
filename = l[len("#include "):].strip()
m = re.match(r'"(?P<filename>.+)"', filename)
if m is not None:
filename = m.group('filename')
# if the path is relative, look for the respective file
# relatively to all paths specified. Take the first file matching.
if not mlnpath(filename).exists:
includefilename = None
for d in dirs:
mlnp = '/'.join([d, filename])
if mlnpath(mlnp).exists:
includefilename = mlnp
break
if includefilename is None:
raise Exception('File not found: %s' % filename)
else:
includefilename = filename
else:
m = re.match(r'<(?P<filename>.+)>', filename)
if m is not None:
filename = m.group('filename')
else:
raise MLNParsingError('Malformed #include statement: %s' % line)
if projectpath is None:
raise MLNParsingError('No project specified: Cannot locate import from project: %s' % filename)
includefilename = ':'.join([projectpath, filename])
logger.debug('Including file: "%s"' % includefilename)
p = mlnpath(includefilename)
dbs.extend(parse_db(content=mlnpath(includefilename).content, ignore_unknown_preds=ignore_unknown_preds, dirs=[p.resolve_path()]+dirs,
projectpath=ifnone(p.project, projectpath, lambda x: '/'.join(p.path+[x])), mln=mln))
continue
# valued evidence
elif l[0] in "0123456789":
s = l.find(" ")
gndatom = l[s + 1:].replace(" ", "")
value = float(l[:s])
if value < 0 or value > 1:
raise Exception('Valued evidence must be in [0,1]')
if gndatom in db.evidence:
raise Exception("Duplicate soft evidence for '%s'" % gndatom)
try:
_, predname, constants = mln.logic.parse_literal(gndatom) # TODO Should we allow soft evidence on non-atoms here? (This assumes atoms)
except NoSuchPredicateError, e:
if ignore_unknown_preds: continue
else: raise e
domnames = mln.predicate(predname).argdoms
db << (gndatom, value)
# literal
else:
if l[0] == "?":
raise Exception("Unknown literals not supported (%s)" % l) # this is an Alchemy feature
try:
true, predname, constants = mln.logic.parse_literal(l)
except NoSuchPredicateError, e:
if ignore_unknown_preds: continue
else: raise e
except Exception, e:
traceback.print_exc()
raise MLNParsingError('Error parsing line %d: %s (%s)' % (line+1, l, e.message))
if mln.predicate(predname) is None and ignore_unknown_preds:
log.debug('Predicate "%s" is undefined.' % predname)
continue
elif mln.predicate(predname) is None:
raise NoSuchPredicateError(predname)
domnames = mln.predicate(predname).argdoms
# save evidence
true = 1 if true else 0
db << ("%s(%s)" % (predname, ",".join(constants)), true)
# expand domains
if len(domnames) != len(constants):
raise Exception("Ground atom %s in database %d has wrong number of parameters" % (l, len(dbs)))
for i, c in enumerate(constants):
db.domain({domnames[i]: c})
if not db.isempty(): dbs.append(db)
if len(dbs) > 1 and not allow_multiple:
raise Exception('Only one single database is permitted when loading via the constructor. Use Database.load() for loading multiple DBs,')
return dbs
def readall_dbs(mln, path):
'''
Loads and yields all databases (*.db files) that are located in
the given directory and returns the corresponding Database objects.
:param path: the directory path to look for .db files
'''
for dirname, dirnames, filenames in os.walk(path): #@UnusedVariable
for f in filenames:
if not f.endswith('.db'):
continue
p = os.path.join(dirname, f)
print " reading database %s" % p
dbs = Database.load(mln, p)
if type(dbs) == list:
for db in dbs:
yield db
else:
yield dbs
|
tests/test_galleries.py | kevinlai219/Mezzanine-Django | 3,053 | 12673761 | <reponame>kevinlai219/Mezzanine-Django
import os
from shutil import rmtree
from uuid import uuid4
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
from mezzanine.galleries.models import GALLERIES_UPLOAD_DIR, Gallery
from mezzanine.utils.tests import TestCase, copy_test_to_media
class GalleriesTests(TestCase):
def test_gallery_import(self):
"""
Test that a gallery creates images when given a zip file to
import, and that descriptions are created.
"""
zip_name = "gallery.zip"
copy_test_to_media("mezzanine.core", zip_name)
title = str(uuid4())
gallery = Gallery.objects.create(title=title, zip_import=zip_name)
images = list(gallery.images.all())
self.assertTrue(images)
self.assertTrue(all([image.description for image in images]))
# Clean up.
rmtree(os.path.join(settings.MEDIA_ROOT, GALLERIES_UPLOAD_DIR, title))
def test_thumbnail_generation(self):
"""
Test that a thumbnail is created and resized.
"""
try:
from PIL import Image
except ImportError:
return
image_name = "image.jpg"
size = (24, 24)
copy_test_to_media("mezzanine.core", image_name)
thumb_name = os.path.join(
settings.THUMBNAILS_DIR_NAME,
image_name,
image_name.replace(".", "-%sx%s." % size),
)
thumb_path = os.path.join(settings.MEDIA_ROOT, thumb_name)
thumb_image = thumbnail(image_name, *size)
self.assertEqual(os.path.normpath(thumb_image.lstrip("/")), thumb_name)
self.assertNotEqual(os.path.getsize(thumb_path), 0)
thumb = Image.open(thumb_path)
self.assertEqual(thumb.size, size)
# Clean up.
del thumb
os.remove(os.path.join(settings.MEDIA_ROOT, image_name))
os.remove(os.path.join(thumb_path))
rmtree(os.path.join(os.path.dirname(thumb_path)))
|
pcdet/models/roi_heads/target_assigner/proposal_target_layer.py | s-ryosky/ST3D | 184 | 12673766 | import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'raw_roi_iou':
batch_cls_labels = batch_roi_ious
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = torch.nonzero((max_overlaps >= fg_thresh)).view(-1)
easy_bg_inds = torch.nonzero((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)
hard_bg_inds = torch.nonzero((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
|
tkgui/player_info.py | hawson/rpg-text | 162 | 12673767 | <gh_stars>100-1000
class PlayerInfo:
def __init__(self, manager):
self.window = manager.window
self.game = manager.game
self.manager = manager
self._bottom_index = 0
self._top_index = 0
self.bottom_num_pages = 3
self.top_num_pages = 2
def __call__(self):
self.setup_ui()
self.refresh_page()
@property
def bottom_index(self):
return self._bottom_index
@bottom_index.setter
def bottom_index(self, value):
if value < 0:
self._bottom_index = self.bottom_num_pages - 1
elif value > self.bottom_num_pages - 1:
self._bottom_index = 0
else:
self._bottom_index = value
@property
def top_index(self):
return self._top_index
@top_index.setter
def top_index(self, value):
if value < 0:
self._top_index = self.top_num_pages - 1
elif value > self.top_num_pages - 1:
self._top_index = 0
else:
self._top_index = value
def setup_ui(self):
self.window.print(self.game.player.name, (50, 1))
self.window.print(self.game.player.job.name.capitalize(), (49, 2))
self.window.print(f"Level - {self.game.player.level}", (67, 2))
self.window.print(f'XP {self.game.player.experience}/{self.game.player.xp_to_next_level}', (49, 3))
self.window.print(f'Health {self.game.player.health}/{self.game.player.max_health}', (49, 4))
self.window.print(f'Mana {self.game.player.mana}/{self.game.player.max_mana}', (49, 5))
def setup_equipmnt(self):
self.window.print(' EQUIPMNT ', (57, 7))
i = 0
for slot_name, slot_item in self.game.player.inventory.equipped.as_dict.items():
if not isinstance(slot_item, list):
self.window.print(f'{slot_name.upper()} : {slot_item.capitalize()}', (49, 8 + i))
else:
self.window.print(f'{slot_name.upper()} : '
f'{", ".join([s.capitalize() for s in slot_item])}', (49, 8 + i))
i += 1
def setup_commands(self):
self.window.print(' COMMANDS ', (57, 7))
pass
def clear_page(self):
self.window.print(' ' * 10, (57, 14))
self.window.print(' ' * 10, (57, 7))
for i in range(8):
self.window.print(' ' * 29, (48, 15 + i))
for i in range(6):
self.window.print(' ' * 29, (48, 8 + i))
def setup_stats(self):
self.window.print('STATS', (59, 14))
i = 0
for key, value in self.game.player.stats.as_dict.items():
self.window.print(f'{key.upper()} - {value}', (49, 15 + i))
i += 1
def setup_saving_throws(self):
self.window.print('SAV.THROWS', (57, 14))
i = 0
for key, value in self.game.player.job.saving_throws.as_dict.items():
self.window.print(f'{key.upper()} - {value}', (49, 15 + i))
i += 1
def setup_money(self):
self.window.print(' MONEY ', (57, 14))
i = 0
for key, value in self.game.player.inventory.money.coins.items():
self.window.print(f'{key.upper()} : {value}', (49, 15 + i))
i += 1
self.window.print(f'GEMS : {self.game.player.inventory.money.gems_value} GC', (49, 15 + i))
self.window.print(f'JEWELS : {self.game.player.inventory.money.jewels_value} GC', (49, 16 + i))
self.window.print(f'TOTAL : {self.game.player.inventory.money.value:02} GC', (49, 17 + i))
def on_bottom_page_left(self, event):
self.bottom_index -= 1
self.refresh_page()
def on_bottom_page_right(self, event):
self.bottom_index += 1
self.refresh_page()
def on_top_page_left(self, event):
self.top_index -= 1
self.refresh_page()
def on_top_page_right(self, event):
self.top_index += 1
self.refresh_page()
def refresh_page(self):
self.clear_page()
[self.setup_stats, self.setup_saving_throws, self.setup_money][self.bottom_index]()
[self.setup_equipmnt, self.setup_commands][self.top_index]()
self.window.button('<', (56, 14), self.on_bottom_page_left)
self.window.button('<', (56, 7), self.on_top_page_left)
self.window.button('>', (67, 14), self.on_bottom_page_right)
self.window.button('>', (67, 7), self.on_top_page_right)
|
src/sparseml/sparsification/info.py | clementpoiret/sparseml | 922 | 12673800 | <gh_stars>100-1000
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functionality related to describing availability and information of sparsification
algorithms to models within in the ML frameworks.
The file is executable and will get the sparsification info for a given framework:
##########
Command help:
usage: info.py [-h] [--path PATH] framework
Compile the available setup and information for the sparsification of a model
in a given framework.
positional arguments:
framework the ML framework or path to a framework file to load the
sparsification info for
optional arguments:
-h, --help show this help message and exit
--path PATH A full file path to save the sparsification info to. If not
supplied, will print out the sparsification info to the
console.
#########
EXAMPLES
#########
##########
Example command for getting the sparsification info for pytorch.
python src/sparseml/sparsification/info.py pytorch
"""
import argparse
import logging
import os
from enum import Enum
from typing import Any, List, Optional
from pydantic import BaseModel, Field
from sparseml.base import execute_in_sparseml_framework
from sparseml.utils import clean_path, create_parent_dirs
__all__ = [
"ModifierType",
"ModifierPropInfo",
"ModifierInfo",
"SparsificationInfo",
"sparsification_info",
"save_sparsification_info",
"load_sparsification_info",
]
_LOGGER = logging.getLogger(__name__)
class ModifierType(Enum):
"""
Types of modifiers for grouping what functionality a Modifier falls under.
"""
general = "general"
training = "training"
pruning = "pruning"
quantization = "quantization"
act_sparsity = "act_sparsity"
misc = "misc"
class ModifierPropInfo(BaseModel):
"""
Class for storing information and associated metadata for a
property on a given Modifier.
Extends pydantics BaseModel class for serialization to and from json
in addition to proper type checking on construction.
"""
name: str = Field(
title="name",
description=(
"Name of the property for a Modifier. "
"It can be accessed by this name on the modifier instance."
),
)
description: str = Field(
title="description",
description="Description and information for the property for a Modifier.",
)
type_: str = Field(
title="type_",
description=(
"The format type for the property for a Modifier such as "
"int, float, str, etc."
),
)
restrictions: Optional[List[Any]] = Field(
default=None,
title="restrictions",
description=(
"Value restrictions for the property for a Modifier. "
"If set, restrict the set value to one of the contained restrictions."
),
)
class ModifierInfo(BaseModel):
"""
Class for storing information and associated metadata for a given Modifier.
Extends pydantics BaseModel class for serialization to and from json
in addition to proper type checking on construction.
"""
name: str = Field(
title="name",
description=(
"Name/class of the Modifier to be used for construction and identification."
),
)
description: str = Field(
title="description",
description="Description and info for the Modifier and what its used for.",
)
type_: ModifierType = Field(
default=ModifierType.misc,
title="type_",
description=(
"The type the given Modifier is for grouping by similar functionality."
),
)
props: List[ModifierPropInfo] = Field(
default=[],
title="props",
description="The properties for the Modifier that can be set and controlled.",
)
warnings: Optional[List[str]] = Field(
default=None,
title="warnings",
description=(
"Any warnings that apply for the Modifier and using it within a system"
),
)
class SparsificationInfo(BaseModel):
"""
Class for storing the information for sparsifying in a given framework.
Extends pydantics BaseModel class for serialization to and from json
in addition to proper type checking on construction.
"""
modifiers: List[ModifierInfo] = Field(
default=[],
title="modifiers",
description="A list of the information for the available modifiers",
)
def type_modifiers(self, type_: ModifierType) -> List[ModifierInfo]:
"""
Get the contained Modifiers for a specific ModifierType.
:param type_: The ModifierType to filter the returned list of Modifiers by.
:type type_: ModifierType
:return: The filtered list of Modifiers that match the given type_.
:rtype: List[ModifierInfo]
"""
modifiers = []
for mod in self.modifiers:
if mod.type_ == type_:
modifiers.append(mod)
return modifiers
def sparsification_info(framework: Any) -> SparsificationInfo:
"""
Get the available setup for sparsifying model in the given framework.
:param framework: The item to detect the ML framework for.
See :func:`detect_framework` for more information.
:type framework: Any
:return: The sparsification info for the given framework
:rtype: SparsificationInfo
"""
_LOGGER.debug("getting sparsification info for framework %s", framework)
info: SparsificationInfo = execute_in_sparseml_framework(
framework, "sparsification_info"
)
_LOGGER.info("retrieved sparsification info for framework %s: %s", framework, info)
return info
def save_sparsification_info(framework: Any, path: Optional[str] = None):
"""
Save the sparsification info for a given framework.
If path is provided, will save to a json file at that path.
If path is not provided, will print out the info.
:param framework: The item to detect the ML framework for.
See :func:`detect_framework` for more information.
:type framework: Any
:param path: The path, if any, to save the info to in json format.
If not provided will print out the info.
:type path: Optional[str]
"""
_LOGGER.debug(
"saving sparsification info for framework %s to %s",
framework,
path if path else "sys.out",
)
info = (
sparsification_info(framework)
if not isinstance(framework, SparsificationInfo)
else framework
)
if path:
path = clean_path(path)
create_parent_dirs(path)
with open(path, "w") as file:
file.write(info.json())
_LOGGER.info(
"saved sparsification info for framework %s in file at %s", framework, path
),
else:
print(info.json(indent=4))
_LOGGER.info("printed out sparsification info for framework %s", framework)
def load_sparsification_info(load: str) -> SparsificationInfo:
"""
Load the sparsification info from a file or raw json.
If load exists as a path, will read from the file and use that.
Otherwise will try to parse the input as a raw json str.
:param load: Either a file path to a json file or a raw json string.
:type load: str
:return: The loaded sparsification info.
:rtype: SparsificationInfo
"""
load_path = clean_path(load)
if os.path.exists(load_path):
with open(load_path, "r") as file:
load = file.read()
info = SparsificationInfo.parse_raw(load)
return info
def _parse_args():
parser = argparse.ArgumentParser(
description=(
"Compile the available setup and information for the sparsification "
"of a model in a given framework."
)
)
parser.add_argument(
"framework",
type=str,
help=(
"the ML framework or path to a framework file to load the "
"sparsification info for"
),
)
parser.add_argument(
"--path",
type=str,
default=None,
help=(
"A full file path to save the sparsification info to. "
"If not supplied, will print out the sparsification info to the console."
),
)
return parser.parse_args()
def _main():
args = _parse_args()
save_sparsification_info(args.framework, args.path)
if __name__ == "__main__":
_main()
|
Chapter04/wavnet/wavenet_utils.py | sbi97/R-Deep-learning | 1,119 | 12673806 | <reponame>sbi97/R-Deep-learning
import keras.backend as K
from keras.layers import AtrousConvolution1D
from keras.utils.np_utils import conv_output_length
def categorical_mean_squared_error(y_true, y_pred):
"""MSE for categorical variables."""
return K.mean(K.square(K.argmax(y_true, axis=-1) -
K.argmax(y_pred, axis=-1)))
class CausalAtrousConvolution1D(AtrousConvolution1D):
def __init__(self, nb_filter, filter_length, init='glorot_uniform', activation=None, weights=None,
border_mode='valid', subsample_length=1, atrous_rate=1, W_regularizer=None, b_regularizer=None,
activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, causal=False, **kwargs):
super(CausalAtrousConvolution1D, self).__init__(nb_filter, filter_length, init, activation, weights,
border_mode, subsample_length, atrous_rate, W_regularizer,
b_regularizer, activity_regularizer, W_constraint, b_constraint,
bias, **kwargs)
self.causal = causal
if self.causal and border_mode != 'valid':
raise ValueError("Causal mode dictates border_mode=valid.")
def get_output_shape_for(self, input_shape):
input_length = input_shape[1]
if self.causal:
input_length += self.atrous_rate * (self.filter_length - 1)
length = conv_output_length(input_length,
self.filter_length,
self.border_mode,
self.subsample[0],
dilation=self.atrous_rate)
return (input_shape[0], length, self.nb_filter)
def call(self, x, mask=None):
if self.causal:
x = K.asymmetric_temporal_padding(x, self.atrous_rate * (self.filter_length - 1), 0)
return super(CausalAtrousConvolution1D, self).call(x, mask)
|
recipes/Python/578107_Tracking_Manipulating_PythImport/recipe-578107.py | tdiprima/code | 2,023 | 12673809 | <gh_stars>1000+
"""import_state.py
A rough implementation of PEP 405. This module centers on manipulating
the normal Python import machinery through its defined state. Any other
approach, such as replacing builtins.__import__ is certainly legal, but
not supported here.
"""
__all__ = ['ImportState', 'default_import_state', 'globalstate']
import sys
import builtins
import site
import importlib
import _imp
from collections import namedtuple
class GlobalImportLock:
# no need for a generic ImportLock type, since all import states
# use the same lock
@property
def acquire(self):
_imp.acquire_lock()
@property
def release(self):
_imp.release_lock()
@property
def lock_held(self):
_imp.lock_held()
_ImportState = namedtuple('_ImportState', (
'modules',
'meta_path',
'path',
'path_hooks',
'path_importer_cache',
))
class ImportState(_ImportState):
"""A container for the import state (a la PEP 406).
The dictionary in sys.modules is a special case, since it is part
of the CPython interpreter state. Binding a different dict there
is problematic, since the import machinery may use the internal
reference to the original dict, rather than looking up sys.modules.
The consequence is that the _contents_ of sys.modules must be
swapped in and out, rather than simply binding something else there.
ImportState objects may be used as context managers, to activate the
state temporarily. During a with statement the dict in self.modules
may not reflect the actual state. However, it _will_ be correct
before and after the with statement.
"""
# all import states use the same lock
lock = GlobalImportLock()
def __init__(self, *args, **kwargs):
self._saved = None
def __enter__(self):
self.lock.acquire()
self.activate()
def __exit__(self, *args, **kwargs):
self.deactivate()
self.lock.release()
def copy(self):
"""Return a shallow copy of the import state."""
return type(self)(self.modules.copy(), self.meta_path[:],
self.path[:], self.path_hooks[:],
self.path_importer_cache.copy())
def activate(self, force=False):
"""Have the interpreter use this import state, saving the old."""
if self._saved is not None and not force:
raise TypeError("Already activated; try using a copy")
self._saved = _ImportState(
sys.modules.copy(), # saving away the contents
sys.meta_path,
sys.path,
sys.path_hooks,
sys.path_importer_cache,
)
#sys.modules = self.modules
sys.meta_path = self.meta_path
sys.path = self.path
sys.path_hooks = self.meta_path
sys.path_importer_cache = self.path_importer_cache
# accommodate sys.module's quirkiness
sys.modules.clear()
sys.modules.update(self.modules)
def deactivate(self):
"""Restore the import state saved when this one activated."""
if not self._saved:
raise TypeError("Not activated yet")
# sys.modules = self.modules
sys.meta_path = self._saved.meta_path
sys.path = self._saved.path
sys.path_hooks = self._saved.path_hooks
sys.path_importer_cache = self._saved.path_importer_cache
# accommodate sys.module's quirkiness
self.modules.clear()
self.modules.update(sys.modules)
sys.modules.clear()
sys.modules.update(self._saved.modules)
self._saved = None
def default_import_state(**overrides):
"""Return an ImportState with defaults to the initial import state."""
state = {
'modules': {},
'meta_path': [],
'path': site.getsitepackages(),
'path_hooks': [],
'path_importer_cache': {},
}
state.update(overrides)
return ImportState(**state)
class GlobalImportState(ImportState):
"""An ImportState that wraps the current state"""
# The underlying ImportState values will be ignored.
def __new__(cls):
return super(GlobalImportState, cls).__new__(cls, *([None]*5))
@property
def modules(self):
"""The cache of modules that have already been imported."""
return sys.modules
@property
def meta_path(self):
"""The PEP 302 finders queried before 'path' is traversed."""
return sys.meta_path
@property
def path(self):
"""The directories in which top-level packages are located."""
return sys.path
@property
def path_hooks(self):
"""The PEP 302 path importers that are queried for a path."""
return sys.path_hooks
@property
def path_importer_cache(self):
"""The cache of finders previously found through path_hooks."""
return sys.path_importer_cache
globalstate = GlobalImportState()
|
tests/test_at.py | CrackerCat/regexploit | 592 | 12673829 | <filename>tests/test_at.py
import pytest
from regexploit.ast.at import EndOfString
from regexploit.ast.sre import SreOpParser
def from_regex(pattern: str):
return SreOpParser().parse_sre(pattern)
@pytest.mark.parametrize(
"r",
[
r".*b*",
r".*\w*b*",
r".+b*",
],
)
def test_cannot_backtrack(r):
dollar = EndOfString()
dollar.set_character(from_regex(r).elements)
assert dollar.character.is_any
@pytest.mark.parametrize(
"r",
[
r"x[ab]*b*",
r"x+[ab]*",
r"x+a*[ab]*a*b*",
],
)
def test_dollar_simple(r):
dollar = EndOfString()
dollar.set_character(from_regex(r).elements)
assert dollar.character == from_regex("[ab]")
@pytest.mark.parametrize(
"r",
[
r"\w*b*",
r"x\w*\w*b*",
r"\w+b*",
],
)
def test_dollar_optionals_contained_by_mandatory(r):
dollar = EndOfString()
dollar.set_character(from_regex(r).elements)
assert dollar.character == from_regex(r"[\w]").expand_categories()
def test_whole_string():
dollar = EndOfString()
dollar.set_character(from_regex(r"a*a*").elements)
assert dollar.character == from_regex(r"[a]")
def test_real():
dollar = EndOfString()
dollar.set_character(from_regex(r"-\d+(\s*\s*\s*)").elements)
assert dollar.character == from_regex(r"[\s]")
|
doc/generate-doc.py | ponty/pyscreenshot | 416 | 12673830 | import glob
import logging
import os
from easyprocess import EasyProcess
from entrypoint2 import entrypoint
# (cmd,grab,background)
commands = [
"python3 -m pyscreenshot.check.versions",
"python3 -m pyscreenshot.examples.virtdisp",
"python3 -m pyscreenshot.check.speedtest",
"python3 -m pyscreenshot.check.speedtest --childprocess 0",
]
def empty_dir(dir):
files = glob.glob(os.path.join(dir, "*"))
for f in files:
os.remove(f)
@entrypoint
def main():
gendir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "gen")
logging.info("gendir: %s", gendir)
os.makedirs(gendir, exist_ok=True)
empty_dir(gendir)
pls = []
try:
os.chdir("gen")
for cmd in commands:
logging.info("cmd: %s", cmd)
fname_base = cmd.replace(" ", "_")
fname = fname_base + ".txt"
logging.info("cmd: %s", cmd)
print("file name: %s" % fname)
with open(fname, "w") as f:
f.write("$ " + cmd + "\n")
p = EasyProcess(cmd).call()
f.write(p.stdout)
f.write(p.stderr)
pls += [p]
finally:
os.chdir("..")
for p in pls:
p.stop()
embedme = EasyProcess(["npx", "embedme", "../README.md"])
embedme.call()
print(embedme.stdout)
assert embedme.return_code == 0
assert not "but file does not exist" in embedme.stdout
|
gazelle/bzl/testdata/defaultvisibility/nested/dir/bar.bzl | jkjk822/bazel-skylib | 223 | 12673834 | <gh_stars>100-1000
"""
Doc string
"""
def asdf():
pass
|
carla/recourse_methods/catalog/focus/distances.py | jayanthyetukuri/CARLA | 140 | 12673841 | import tensorflow as tf
from tensorflow.losses import Reduction
def distance_func(name, x1, x2, eps: float = 0.0):
if name == "l1":
ax = 1
return l1_dist(x1, x2, ax, eps)
if name == "l2":
ax = 1
return l2_dist(x1, x2, ax, eps)
if name == "cosine":
ax = -1
return cosine_dist(x1, x2, ax, eps)
def l1_dist(x1, x2, ax: int, eps: float = 0.0):
# sum over |x| + eps, i.e. L1 norm
x = x1 - x2
return tf.reduce_sum(tf.abs(x), axis=ax) + eps
def l2_dist(x1, x2, ax: int, eps: float = 0.0):
# sqrt((sum over x^2) + eps)), i.e. L2 norm
x = x1 - x2
return (tf.reduce_sum(x ** 2, axis=ax) + eps) ** 0.5
def cosine_dist(x1, x2, ax: int, eps: float = 0.0):
# normalize by sqrt(max(sum(x**2), 1e-12))
normalize_x1 = tf.nn.l2_normalize(x1, dim=1)
normalize_x2 = tf.nn.l2_normalize(x2, dim=1)
dist = (
tf.losses.cosine_distance(
normalize_x1, normalize_x2, axis=ax, reduction=Reduction.NONE
)
+ eps
)
dist = tf.squeeze(dist)
dist = tf.cast(dist, tf.float64)
return dist
|
pytimeparse/__init__.py | Dushistov/pytimeparse | 216 | 12673848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
__init__.py
(c) <NAME> <<EMAIL>> 1 February, 2014
`timeparse` module.
'''
from __future__ import absolute_import
from codecs import open
from os import path
# Version. For each new release, the version number should be updated
# in the file VERSION.
try:
# If a VERSION file exists, use it!
with open(path.join(path.dirname(__file__), 'VERSION'),
encoding='utf-8') as infile:
__version__ = infile.read().strip()
except NameError:
__version__ = 'unknown (running code interactively?)'
except IOError as ex:
__version__ = "unknown (%s)" % ex
# import top-level functionality
from .timeparse import timeparse as parse
|
vlcp/protocol/openflow/defs/openflow10.py | hubo1016/vlcp | 252 | 12673878 | '''
/*
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* OpenFlow: protocol between controller and datapath. */
Created on 2015/7/13
:author: hubo
'''
from .common import *
from . import common
from namedstruct.namedstruct import StructDefWarning
import warnings as _warnings
with _warnings.catch_warnings():
_warnings.filterwarnings('ignore', '^padding', StructDefWarning)
'''
/* Port number(s) meaning
* --------------- --------------------------------------
* 0x0000 not assigned a meaning by OpenFlow 1.0
* 0x0001...0xfeff "physical" ports
* 0xff00...0xfff7 "reserved" but not assigned a meaning by OpenFlow 1.0
* 0xfff8...0xffff "reserved" OFPP_* ports with assigned meanings
*/
/* Ranges. */
'''
ofp_port = enum('ofp_port',
globals(),
uint16,
OFPP_MAX = 0xff00, # /* Max # of switch ports. */
# /* Reserved output "ports". */
OFPP_IN_PORT = 0xfff8, # /* Where the packet came in. */
OFPP_TABLE = 0xfff9, # /* Perform actions in flow table. */
OFPP_NORMAL = 0xfffa, # /* Process with normal L2/L3. */
OFPP_FLOOD = 0xfffb, # /* All ports except input port and
# * ports disabled by STP. */
OFPP_ALL = 0xfffc, # /* All ports except input port. */
OFPP_CONTROLLER = 0xfffd, # /* Send to controller. */
OFPP_LOCAL = 0xfffe, # /* Local openflow "port". */
OFPP_NONE = 0xffff # /* Not associated with any port. */
)
ofp_port_no = ofp_port
OFPP_FIRST_RESV = 0xfff8, # /* First assigned reserved port. */
OFPP_LAST_RESV = 0xffff, # /* Last assigned reserved port. */
ofp_type = ofp_type.extend(globals(),
OFPT_VENDOR = 4,
OFPT_FEATURES_REQUEST = 5, #/* Controller/switch message */
OFPT_FEATURES_REPLY = 6, #/* Controller/switch message */
OFPT_GET_CONFIG_REQUEST = 7, #/* Controller/switch message */
OFPT_GET_CONFIG_REPLY = 8, #/* Controller/switch message */
OFPT_SET_CONFIG = 9, #/* Controller/switch message */
OFPT_PACKET_IN = 10, #/* Async message */
OFPT_FLOW_REMOVED = 11, #/* Async message */
OFPT_PORT_STATUS = 12, #/* Async message */
OFPT_PACKET_OUT = 13, #/* Controller/switch message */
OFPT_FLOW_MOD = 14, #/* Controller/switch message */
OFPT_PORT_MOD = 15, #/* Controller/switch message */
OFPT_STATS_REQUEST = 16, #/* Controller/switch message */
OFPT_STATS_REPLY = 17, #/* Controller/switch message */
OFPT_BARRIER_REQUEST = 18, #/* Controller/switch message */
OFPT_BARRIER_REPLY = 19, #/* Controller/switch message */
OFPT_QUEUE_GET_CONFIG_REQUEST = 20, #/* Controller/switch message */
OFPT_QUEUE_GET_CONFIG_REPLY = 21 #/* Controller/switch message */
)
ofp_type_reply_set = set([OFPT_ECHO_REPLY, OFPT_FEATURES_REPLY, OFPT_GET_CONFIG_REPLY, OFPT_STATS_REPLY, OFPT_BARRIER_REPLY, OFPT_QUEUE_GET_CONFIG_REPLY])
ofp_type_asyncmessage_set = set([OFPT_PACKET_IN, OFPT_FLOW_REMOVED, OFPT_PORT_STATUS])
OFP_VERSION = OFP10_VERSION
ofp_msg = nstruct(name = 'ofp_msg',
base = common.ofp_msg_mutable,
criteria = lambda x: x.header.version == OFP_VERSION,
init = packvalue(OFP_VERSION, 'header', 'version'),
classifyby = (OFP_VERSION,),
classifier = lambda x: x.header.type,
extend = {('header','type') : ofp_type})
ofp_vendor = nstruct((experimenter_ids, 'vendor'),
name = 'ofp_vendor',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_VENDOR,
classifyby = (OFPT_VENDOR,),
init = packvalue(OFPT_VENDOR, 'header', 'type')
)
ofp_error_type = ofp_error_type.extend(globals(),
OFPET_FLOW_MOD_FAILED = 3,
OFPET_PORT_MOD_FAILED = 4,
OFPET_QUEUE_OP_FAILED = 5)
'''
/* ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_flow_mod_failed_code = enum('ofp_flow_mod_failed_code', globals(),
OFPFMFC_ALL_TABLES_FULL = 0, # /* Flow not added because of full tables. */
OFPFMFC_OVERLAP = 1, # /* Attempted to add overlapping flow with
# * CHECK_OVERLAP flag set. */
OFPFMFC_EPERM = 2, # /* Permissions error. */
OFPFMFC_BAD_EMERG_TIMEOUT = 3, # /* Flow not added because of non-zero idle/hard
# * timeout. */
OFPFMFC_BAD_COMMAND = 4, # /* Unknown command. */
OFPFMFC_UNSUPPORTED = 5, # /* Unsupported action list - cannot process in
# * the order specified. */
)
'''
/* ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_port_mod_failed_code = enum('ofp_port_mod_failed_code', globals(),
OFPPMFC_BAD_PORT = 0, #/* Specified port does not exist. */
OFPPMFC_BAD_HW_ADDR = 1, #/* Specified hardware address is wrong. */
)
'''
/* ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains
* at least the first 64 bytes of the failed request */
'''
ofp_queue_op_failed_code = enum('ofp_queue_op_failed_code', globals(),
OFPQOFC_BAD_PORT = 0, # /* Invalid port (or port does not exist). */
OFPQOFC_BAD_QUEUE = 1, # /* Queue does not exist. */
OFPQOFC_EPERM = 2 # /* Permissions error. */
)
ofp_error_types = dict(ofp_error_types)
ofp_error_types.update({OFPET_FLOW_MOD_FAILED : ofp_error_typedef(OFPET_FLOW_MOD_FAILED, ofp_flow_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_PORT_MOD_FAILED : ofp_error_typedef(OFPET_PORT_MOD_FAILED, ofp_port_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_QUEUE_OP_FAILED : ofp_error_typedef(OFPET_QUEUE_OP_FAILED, ofp_queue_op_failed_code, OFP_VERSION, ofp_error_type)})
ofp_switch_config = nstruct((ofp_config_flags, 'flags'),
(uint16, 'miss_send_len'),
name = 'ofp_switch_config',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_GET_CONFIG_REPLY or x.header.type == OFPT_SET_CONFIG,
classifyby = (OFPT_GET_CONFIG_REPLY, OFPT_SET_CONFIG),
init = packvalue(OFPT_SET_CONFIG, 'header','type'))
'''
/* OpenFlow 1.0 specific capabilities supported by the datapath (struct
* ofp_switch_features, member capabilities). */
'''
ofp_capabilities = ofp_capabilities.extend(
globals(),
OFPC_STP = 1 << 3, #/* 802.1d spanning tree. */
OFPC_RESERVED = 1 << 4) #/* Reserved, must not be set. */
'''
/* OpenFlow 1.0 specific current state of the physical port. These are not
* configurable from the controller.
*/
/* The OFPPS10_STP_* bits have no effect on switch operation. The
* controller must adjust OFPPC_NO_RECV, OFPPC_NO_FWD, and
* OFPPC_NO_PACKET_IN appropriately to fully implement an 802.1D spanning
* tree. */
'''
ofp_port_state = ofp_port_state.extend(globals(),
OFPPS_STP_LISTEN = 0 << 8, # /* Not learning or relaying frames. */
OFPPS_STP_LEARN = 1 << 8, # /* Learning but not relaying frames. */
OFPPS_STP_FORWARD = 2 << 8, # /* Learning and relaying frames. */
OFPPS_STP_BLOCK = 3 << 8 # /* Not part of spanning tree. */
) # /* Bit mask for OFPPS10_STP_* values. */
OFPPS_STP_MASK = 3 << 8
OFPPS_ALL = OFPPS_LINK_DOWN | OFPPS_STP_MASK
ofp_action_type = enum('ofp_action_type', globals(),
uint16,
OFPAT_OUTPUT = 0, #/* Output to switch port. */
OFPAT_SET_VLAN_VID = 1, #/* Set the 802.1q VLAN id. */
OFPAT_SET_VLAN_PCP = 2, #/* Set the 802.1q priority. */
OFPAT_STRIP_VLAN = 3, #/* Strip the 802.1q header. */
OFPAT_SET_DL_SRC = 4, #/* Ethernet source address. */
OFPAT_SET_DL_DST = 5, #/* Ethernet destination address. */
OFPAT_SET_NW_SRC = 6, #/* IP source address. */
OFPAT_SET_NW_DST = 7, #/* IP destination address. */
OFPAT_SET_NW_TOS = 8, #/* IP ToS (DSCP field, 6 bits). */
OFPAT_SET_TP_SRC = 9, #/* TCP/UDP source port. */
OFPAT_SET_TP_DST = 10, #/* TCP/UDP destination port. */
OFPAT_ENQUEUE = 11, #/* Output to queue. */
OFPAT_VENDOR = 0xffff)
ofp_action = nstruct((ofp_action_type, 'type'),
(uint16, 'len'),
name = 'ofp_action',
size = lambda x: x.len,
prepack = packsize('len'),
classifier = lambda x: x.type
)
ofp_action_vendor = nstruct((experimenter_ids, 'vendor'),
name = 'ofp_action_vendor',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_VENDOR,
classifyby = (OFPAT_VENDOR,),
init = packvalue(OFPAT_VENDOR, 'type')
)
'''
/* Action structure for OFPAT10_OUTPUT, which sends packets out 'port'.
* When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
* number of bytes to send. A 'max_len' of zero means no bytes of the
* packet should be sent. */
'''
ofp_action_output = nstruct((ofp_port, 'port'),
(uint16, 'max_len'),
name = 'ofp_action_output',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_OUTPUT,
classifyby = (OFPAT_OUTPUT,),
init = packvalue(OFPAT_OUTPUT, 'type'))
'''
/* Action structure for OFPAT10_SET_VLAN_VID and OFPAT11_SET_VLAN_VID. */
'''
ofp_action_vlan_vid = nstruct(
(uint16, 'vlan_vid'), # /* VLAN id. */
(uint8[2],),
name = 'ofp_action_vlan_vid',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_VLAN_VID,
classifyby = (OFPAT_SET_VLAN_VID,),
init = packvalue(OFPAT_SET_VLAN_VID, 'type'))
'''
/* Action structure for OFPAT10_SET_VLAN_PCP and OFPAT11_SET_VLAN_PCP. */
'''
ofp_action_vlan_pcp = nstruct(
(uint8, 'vlan_pcp'), # /* VLAN priority. */
(uint8[3],),
name = 'ofp_action_vlan_pcp',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_VLAN_PCP,
classifyby = (OFPAT_SET_VLAN_PCP,),
init = packvalue(OFPAT_SET_VLAN_PCP, 'type'))
'''
/* Action structure for OFPAT10_SET_DL_SRC/DST and OFPAT11_SET_DL_SRC/DST. */
'''
ofp_action_dl_addr = nstruct(
(mac_addr, 'dl_addr'), # /* Ethernet address. */
(uint8[6],),
name = 'ofp_action_dl_addr',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_DL_SRC or x.type == OFPAT_SET_DL_DST,
classifyby = (OFPAT_SET_DL_SRC, OFPAT_SET_DL_DST),
init = packvalue(OFPAT_SET_DL_SRC, 'type'))
'''
/* Action structure for OFPAT10_SET_NW_SRC/DST and OFPAT11_SET_NW_SRC/DST. */
'''
ofp_action_nw_addr = nstruct(
(ip4_addr, 'nw_addr'), # /* IP address. */
name = 'ofp_action_nw_addr',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_NW_SRC or x.type == OFPAT_SET_NW_DST,
classifyby = (OFPAT_SET_NW_SRC, OFPAT_SET_NW_DST),
init = packvalue(OFPAT_SET_NW_SRC, 'type'))
'''
/* Action structure for OFPAT10_SET_NW_TOS and OFPAT11_SET_NW_TOS. */
'''
ofp_action_nw_tos = nstruct(
(uint8, 'nw_tos'), # /* DSCP in high 6 bits, rest ignored. */
(uint8[3],),
name = 'ofp_action_nw_tos',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_NW_TOS,
classifyby = (OFPAT_SET_NW_TOS,),
init = packvalue(OFPAT_SET_NW_TOS, 'type'))
'''
/* Action structure for OFPAT10_SET_TP_SRC/DST and OFPAT11_SET_TP_SRC/DST. */
'''
ofp_action_tp_port = nstruct(
(uint16, 'tp_port'), # /* TCP/UDP port. */
(uint8[2],),
name = 'ofp_action_tp_port',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_TP_SRC or x.type == OFPAT_SET_TP_DST,
classifyby = (OFPAT_SET_TP_SRC, OFPAT_SET_TP_DST),
init = packvalue(OFPAT_SET_TP_SRC, 'type'))
'''
/* OpenFlow 1.0 specific features of physical ports available in a datapath. */
'''
ofp_port_features = ofp_port_features.extend(globals(),
OFPPF_COPPER = 1 << 7, #/* Copper medium. */
OFPPF_FIBER = 1 << 8, #/* Fiber medium. */
OFPPF_AUTONEG = 1 << 9, #/* Auto-negotiation. */
OFPPF_PAUSE = 1 << 10, #/* Pause. */
OFPPF_PAUSE_ASYM = 1 << 11 #/* Asymmetric pause. */
)
'''
/* Description of a physical port */
'''
ofp_phy_port = nstruct(
(ofp_port, 'port_no'),
(mac_addr, 'hw_addr'),
(char[OFP_MAX_PORT_NAME_LEN], 'name'), #/* Null-terminated */
(ofp_port_config, 'config'), # /* Bitmap of OFPPC_* and OFPPC10_* flags. */
(ofp_port_state, 'state'), # /* Bitmap of OFPPS_* and OFPPS10_* flags. */
#/* Bitmaps of OFPPF_* and OFPPF10_* that describe features. All bits
# * zeroed if unsupported or unavailable. */
(ofp_port_features, 'curr'), # /* Current features. */
(ofp_port_features, 'advertised'), # /* Features being advertised by the port. */
(ofp_port_features, 'supported'), # /* Features supported by the port. */
(ofp_port_features, 'peer'), # /* Features advertised by peer. */
name = 'ofp_phy_port'
)
ofp_action_type_bitwise = enum('ofp_action_type_bitwise', None, uint32, True,
**dict((k, 1<<v) for (k,v) in ofp_action_type.getDict().items() if v < 32))
ofp_switch_features = nstruct((uint64, 'datapath_id'),
(uint32, 'n_buffers'),
(uint8, 'n_tables'),
(uint8[3],),
(ofp_capabilities, 'capabilities'),
(ofp_action_type_bitwise, 'actions'),
(ofp_phy_port[0], 'ports'),
name = 'ofp_switch_features',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_FEATURES_REPLY,
classifyby = (OFPT_FEATURES_REPLY,),
init = packvalue(OFPT_FEATURES_REPLY, 'header', 'type'))
'''
/* Modify behavior of the physical port */
'''
ofp_port_mod = nstruct(
(ofp_port, 'port_no'),
(mac_addr, 'hw_addr'),
(ofp_port_config, 'config'), # /* Bitmap of OFPPC_* flags. */
(ofp_port_config, 'mask'), # /* Bitmap of OFPPC_* flags to be changed. */
(ofp_port_features, 'advertise'), # /* Bitmap of "ofp_port_features"s. Zero all bits to prevent any action taking place. */
(uint8[4],), # /* Pad to 64-bits. */
name = 'ofp_port_mod',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PORT_MOD,
classifyby = (OFPT_PORT_MOD,),
init = packvalue(OFPT_PORT_MOD, 'header', 'type')
)
ofp_queue_prop_header = nstruct((ofp_queue_properties, 'property'),
(uint16, 'len'),
(uint8[4],),
name = 'ofp_queue_prop_header')
ofp_queue_prop = nstruct((ofp_queue_prop_header, 'prop_header'),
name = 'ofp_queue_prop',
size = lambda x: x.prop_header.len,
prepack = packrealsize('prop_header', 'len'),
classifier = lambda x: x.prop_header.property
)
ofp_queue_prop_min_rate = nstruct((uint16, 'rate'),
(uint8[6],),
base = ofp_queue_prop,
criteria = lambda x: x.prop_header.property == OFPQT_MIN_RATE,
classifyby = (OFPQT_MIN_RATE,),
init = packvalue(OFPQT_MIN_RATE, 'prop_header', 'property'),
name = 'ofp_queue_prop_min_rate')
ofp_packet_queue = nstruct(
(uint32, 'queue_id'), # /* id for the specific queue. */
(uint16, 'len'), # /* Length in bytes of this queue desc. */
(uint8[2],), # /* 64-bit alignment. */
(ofp_queue_prop[0], 'properties'),
name = 'ofp_packet_queue',
size = lambda x: x.len,
prepack = packsize('len')
)
'''
/* Query for port queue configuration. */
'''
ofp_queue_get_config_request = nstruct(
(uint16, 'port'), # /* Port to be queried. Should refer to a valid physical port (i.e. < OFPP_MAX) */
(uint8[2],),
name = 'ofp_queue_get_config_request',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_QUEUE_GET_CONFIG_REQUEST,
classifyby = (OFPT_QUEUE_GET_CONFIG_REQUEST,),
init = packvalue(OFPT_QUEUE_GET_CONFIG_REQUEST, 'header', 'type')
)
'''
/* Queue configuration for a given port. */
'''
ofp_queue_get_config_reply = nstruct(
(uint16, 'port'),
(uint8[6],),
(ofp_packet_queue[0], 'queues'), # /* List of configured queues. */
base = ofp_msg,
name = 'ofp_queue_get_config_reply',
criteria = lambda x: x.header.type == OFPT_QUEUE_GET_CONFIG_REPLY,
classifyby = (OFPT_QUEUE_GET_CONFIG_REPLY,),
init = packvalue(OFPT_QUEUE_GET_CONFIG_REPLY, 'header', 'type')
)
'''
/* Packet received on port (datapath -> controller). */
'''
ofp_packet_in = nstruct(
(uint32, 'buffer_id'), # /* ID assigned by datapath. */
(uint16, 'total_len'), # /* Full length of frame. */
(ofp_port, 'in_port'), # /* Port on which frame was received. */
(ofp_packet_in_reason, 'reason'), # /* Reason packet is being sent (one of OFPR_*) */
(uint8,),
(raw, 'data'),
base = ofp_msg,
name = 'ofp_packet_in',
criteria = lambda x: x.header.type == OFPT_PACKET_IN,
classifyby = (OFPT_PACKET_IN,),
init = packvalue(OFPT_PACKET_IN, 'header', 'type')
)
'''
/* OFPAT10_ENQUEUE action struct: send packets to given queue on port. */
'''
ofp_action_enqueue = nstruct(
(uint16, 'port'), # /* Port that queue belongs. Should
(uint8[6],), # /* Pad for 64-bit alignment. */
(uint32, 'queue_id'), # /* Where to enqueue the packets. */
name ='ofp_action_enqueue',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_ENQUEUE,
classifyby = (OFPAT_ENQUEUE,),
init = packvalue(OFPAT_ENQUEUE, 'type')
)
'''
/* Send packet (controller -> datapath). */
'''
def _ofp_packet_out_actions_packsize(x):
x.actions_len = x._realsize() - 2
ofp_packet_out_actions = nstruct(
(uint16, 'actions_len'),
(ofp_action[0], 'actions'),
name = 'ofp_packet_out_actions',
size = lambda x: x.actions_len + 2,
prepack = _ofp_packet_out_actions_packsize,
padding = 1)
ofp_packet_out = nstruct(
(uint32, 'buffer_id'), # /* ID assigned by datapath or UINT32_MAX. */
(ofp_port, 'in_port'), # /* Packet's input port (OFPP_NONE if none). */
(ofp_packet_out_actions,),
(raw, 'data'),
name = 'ofp_packet_out',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PACKET_OUT,
classifyby = (OFPT_PACKET_OUT,),
init = packvalue(OFPT_PACKET_OUT, 'header', 'type')
)
'''
/* Flow wildcards. */
'''
OFPFW_NW_SRC_SHIFT = 8
OFPFW_NW_SRC_BITS = 6
OFPFW_NW_DST_SHIFT = 14
OFPFW_NW_DST_BITS = 6
ofp_flow_wildcards = enum('ofp_flow_wildcards', globals(),uint32,True,
OFPFW_IN_PORT = 1 << 0, #/* Switch input port. */
OFPFW_DL_VLAN = 1 << 1, #/* VLAN vid. */
OFPFW_DL_SRC = 1 << 2, #/* Ethernet source address. */
OFPFW_DL_DST = 1 << 3, #/* Ethernet destination address. */
OFPFW_DL_TYPE = 1 << 4, #/* Ethernet frame type. */
OFPFW_NW_PROTO = 1 << 5, #/* IP protocol. */
OFPFW_TP_SRC = 1 << 6, #/* TCP/UDP source port. */
OFPFW_TP_DST = 1 << 7, #/* TCP/UDP destination port. */
#/* IP source address wildcard bit count. 0 is exact match, 1 ignores the
#* LSB, 2 ignores the 2 least-significant bits, ..., 32 and higher wildcard
#* the entire field. This is the *opposite* of the usual convention where
#* e.g. /24 indicates that 8 bits (not 24 bits) are wildcarded. */
OFPFW_NW_SRC_MASK = (((1 << OFPFW_NW_SRC_BITS) - 1)
<< OFPFW_NW_SRC_SHIFT),
OFPFW_NW_SRC_ALL = 32 << OFPFW_NW_SRC_SHIFT,
# /* IP destination address wildcard bit count. Same format as source. */
OFPFW_NW_DST_MASK = (((1 << OFPFW_NW_DST_BITS) - 1)
<< OFPFW_NW_DST_SHIFT),
OFPFW_NW_DST_ALL = 32 << OFPFW_NW_DST_SHIFT,
OFPFW_DL_VLAN_PCP = 1 << 20, # /* VLAN priority. */
OFPFW_NW_TOS = 1 << 21, # /* IP ToS (DSCP field, 6 bits). */
# /* Wildcard all fields. */
OFPFW_ALL = ((1 << 22) - 1)
)
#/* The wildcards for ICMP type and code fields use the transport source
# * and destination port fields, respectively. */
OFPFW_ICMP_TYPE = OFPFW_TP_SRC
OFPFW_ICMP_CODE = OFPFW_TP_DST
#/* The VLAN id is 12-bits, so we can use the entire 16 bits to indicate
# * special conditions. All ones indicates that 802.1Q header is not present.
# */
OFP_VLAN_NONE = 0xffff
'''
/* Fields to match against flows */
'''
ofp_match = nstruct(
(ofp_flow_wildcards, 'wildcards'), # /* Wildcard fields. */
(ofp_port, 'in_port'), # /* Input switch port. */
(mac_addr, 'dl_src'), # /* Ethernet source address. */
(mac_addr, 'dl_dst'), # /* Ethernet destination address. */
(uint16, 'dl_vlan'), # /* Input VLAN. */
(uint8, 'dl_vlan_pcp'), # /* Input VLAN priority. */
(uint8[1],), # /* Align to 64-bits. */
(ethertype, 'dl_type'), # /* Ethernet frame type. */
(uint8, 'nw_tos'), # /* IP ToS (DSCP field, 6 bits). */
(uint8, 'nw_proto'), # /* IP protocol or lower 8 bits of ARP opcode. */
(uint8[2],), # /* Align to 64-bits. */
(ip4_addr, 'nw_src'), # /* IP source address. */
(ip4_addr, 'nw_dst'), # /* IP destination address. */
(uint16, 'tp_src'), # /* TCP/UDP source port. */
(uint16, 'tp_dst'), # /* TCP/UDP destination port. */
name = 'ofp_match'
)
ofp_flow_mod_flags = ofp_flow_mod_flags.extend(globals(),
OFPFF_EMERG = 1 << 2 #/* Part of "emergency flow cache". */
)
'''
/* Flow setup and teardown (controller -> datapath). */
'''
ofp_flow_mod = nstruct(
(ofp_match, 'match'), # /* Fields to match */
(uint64, 'cookie'), # /* Opaque controller-issued identifier. */
# /* Flow actions. */
(ofp_flow_mod_command, 'command'), # /* One of OFPFC_*. */
(uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */
(uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */
(uint16, 'priority'), # /* Priority level of flow entry. */
(uint32, 'buffer_id'), # /* Buffered packet to apply to (or -1). Not meaningful for OFPFC_DELETE*. */
#/* For OFPFC_DELETE* commands, require matching entries to include this as an
# output port. A value of OFPP_NONE indicates no restriction. */
(ofp_port, 'out_port'),
(ofp_flow_mod_flags, 'flags'), # /* One of OFPFF_*. */
(ofp_action[0], 'actions'), # /* The action length is inferred from the length field in the header. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_FLOW_MOD,
classifyby = (OFPT_FLOW_MOD,),
init = packvalue(OFPT_FLOW_MOD, 'header', 'type'),
name = 'ofp_flow_mod'
)
'''
/* Flow removed (datapath -> controller). */
'''
ofp_flow_removed = nstruct(
(ofp_match, 'match'), # /* Description of fields. */
(uint64, 'cookie'), # /* Opaque controller-issued identifier. */
(uint16, 'priority'), # /* Priority level of flow entry. */
(ofp_flow_removed_reason, 'reason'), # /* One of OFPRR_*. */
(uint8[1],), # /* Align to 32-bits. */
(uint32, 'duration_sec'), # /* Time flow was alive in seconds. */
(uint32, 'duration_nsec'),# /* Time flow was alive in nanoseconds beyond duration_sec. */
(uint16, 'idle_timeout'), # /* Idle timeout from original flow mod. */
(uint8[2],), # /* Align to 64-bits. */
(uint64, 'packet_count'),
(uint64, 'byte_count'),
name = 'ofp_flow_removed',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_FLOW_REMOVED,
classifyby = (OFPT_FLOW_REMOVED,),
init = packvalue(OFPT_FLOW_REMOVED, 'header', 'type')
)
ofp_port_status = nstruct(
(ofp_port_reason, 'reason'),
(uint8[7],),
(ofp_phy_port, 'desc'),
name= 'ofp_port_status',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PORT_STATUS,
classifyby = (OFPT_PORT_STATUS,),
init = packvalue(OFPT_PORT_STATUS, 'header', 'type')
)
'''
/* Statistics request or reply message. */
'''
ofp_stats_types = enum('ofp_stats_types', globals(),uint16,
#/* Description of this OpenFlow switch.
#* The request body is empty.
#* The reply body is struct ofp_desc_stats. */
OFPST_DESC = 0,
#/* Individual flow statistics.
#* The request body is struct ofp_flow_stats_request.
#* The reply body is an array of struct ofp_flow_stats. */
OFPST_FLOW = 1,
#/* Aggregate flow statistics.
#* The request body is struct ofp_aggregate_stats_request.
#* The reply body is struct ofp_aggregate_stats_reply. */
OFPST_AGGREGATE = 2,
#/* Flow table statistics.
#* The request body is empty.
#* The reply body is an array of struct ofp_table_stats. */
OFPST_TABLE = 3,
#/* Physical port statistics.
#* The request body is struct ofp_port_stats_request.
#* The reply body is an array of struct ofp_port_stats. */
OFPST_PORT = 4,
#/* Queue statistics for a port
#* The request body defines the port
#* The reply body is an array of struct ofp_queue_stats */
OFPST_QUEUE = 5,
#/* Vendor extension.
#* The request and reply bodies begin with a 32-bit vendor ID, which takes
#* the same form as in "struct ofp_vendor_header". The request and reply
#* bodies are otherwise vendor-defined. */
OFPST_VENDOR = 0xffff
)
ofp_stats_msg = nstruct(
(ofp_stats_types, 'type'), # /* One of the OFPST_* constants. */
(ofp_stats_reply_flags, 'flags'), # /* Requests: always 0.
# * Replies: 0 or OFPSF_REPLY_MORE. */
name = 'ofp_stats_msg',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_STATS_REQUEST or x.header.type == OFPT_STATS_REPLY,
classifyby = (OFPT_STATS_REQUEST, OFPT_STATS_REPLY),
init = packvalue(OFPT_STATS_REQUEST, 'header', 'type')
)
ofp_stats_request = nstruct(
name = 'ofp_stats_request',
base = ofp_stats_msg,
criteria = lambda x: x.header.type == OFPT_STATS_REQUEST,
classifier = lambda x: x.type,
init = packvalue(OFPT_STATS_REQUEST, 'header', 'type')
)
ofp_stats_reply = nstruct(
name = 'ofp_stats_request',
base = ofp_stats_msg,
criteria = lambda x: x.header.type == OFPT_STATS_REPLY,
classifier = lambda x: x.type,
init = packvalue(OFPT_STATS_REPLY, 'header', 'type')
)
DESC_STR_LEN = 256
SERIAL_NUM_LEN = 32
ofp_desc_stats = nstruct((char[DESC_STR_LEN], 'mfr_desc'),
(char[DESC_STR_LEN], 'hw_desc'),
(char[DESC_STR_LEN], 'sw_desc'),
(char[SERIAL_NUM_LEN], 'serial_num'),
(char[DESC_STR_LEN], 'dp_desc'),
name = 'ofp_desc_stats')
ofp_desc_stats_reply = nstruct(
(ofp_desc_stats,),
name = 'ofp_desc_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_DESC,
classifyby = (OFPST_DESC,),
init = packvalue(OFPST_DESC, 'type')
)
'''
/* Stats request of type OFPST_AGGREGATE or OFPST_FLOW. */
'''
ofp_flow_stats_request = nstruct(
(ofp_match, 'match'), # /* Fields to match. */
(ofp_table, 'table_id'), # /* ID of table to read (from ofp_table_stats) or 0xff for all tables. */
(uint8,), # /* Align to 32 bits. */
(ofp_port, 'out_port'), # /* Require matching entries to include this as an output port. A value of OFPP_NONE indicates no restriction. */
name = 'ofp_flow_stats_request',
base = ofp_stats_request,
criteria = lambda x: x.type == OFPST_FLOW or x.type == OFPST_AGGREGATE,
classifyby = (OFPST_FLOW, OFPST_AGGREGATE),
init = packvalue(OFPST_FLOW, 'type')
)
'''
/* Body of reply to OFPST_FLOW request. */
'''
ofp_flow_stats = nstruct(
(uint16, 'length'), #/* Length of this entry. */
(uint8, 'table_id'), #/* ID of table flow came from. */
(uint8,),
(ofp_match, 'match'), #/* Description of fields. */
(uint32, 'duration_sec'), #/* Time flow has been alive in seconds. */
(uint32, 'duration_nsec'), #/* Time flow has been alive in nanoseconds beyond duration_sec. */
(uint16, 'priority'), #/* Priority of the entry. Only meaningful when this is not an exact-match entry. */
(uint16, 'idle_timeout'), #/* Number of seconds idle before expiration. */
(uint16, 'hard_timeout'), #/* Number of seconds before expiration. */
(uint8[6],), #/* Align to 64 bits. */
(uint64, 'cookie'), #/* Opaque controller-issued identifier. */
(uint64, 'packet_count'), #/* Number of packets in flow. */
(uint64, 'byte_count'), #/* Number of bytes in flow. */
(ofp_action[0], 'actions'),#/* Actions. */
name = 'ofp_flow_stats',
size = lambda x: x.length,
prepack = packsize('length')
)
ofp_flow_stats_reply = nstruct(
(ofp_flow_stats[0], 'stats'),
name = 'ofp_flow_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_FLOW,
classifyby = (OFPST_FLOW,),
init = packvalue(OFPST_FLOW, 'type')
)
ofp_table = enum('ofp_table',
globals(),
uint8,
OFPTT_ALL = 0xff)
'''
/* Body for ofp_stats_request of type OFPST_AGGREGATE. */
'''
ofp_aggregate_stats_request = nstruct(
(ofp_match, 'match'), # /* Fields to match. */
(ofp_table, 'table_id'), # /* ID of table to read (from ofp_table_stats)
# 0xff for all tables or 0xfe for emergency. */
(uint8,), # /* Align to 32 bits. */
(ofp_port, 'out_port'), # /* Require matching entries to include this
# as an output port. A value of OFPP_NONE
# indicates no restriction. */
base = ofp_stats_request,
criteria = lambda x: x.type == OFPST_AGGREGATE,
classifyby = (OFPST_AGGREGATE,),
init = packvalue(OFPST_AGGREGATE, 'type'),
name = 'ofp_aggregate_stats_request'
)
'''
/* Body of reply to OFPST_AGGREGATE request. */
'''
ofp_aggregate_stats_reply = nstruct(
(uint64, 'packet_count'), # /* Number of packets in flows. */
(uint64, 'byte_count'), # /* Number of bytes in flows. */
(uint32, 'flow_count'), # /* Number of flows. */
(uint8[4],),
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_AGGREGATE,
classifyby = (OFPST_AGGREGATE,),
init = packvalue(OFPST_AGGREGATE, 'type'),
name = 'ofp_aggregate_stats_reply'
)
'''
/* Body of reply to OFPST_TABLE request. */
'''
ofp_table_stats = nstruct(
(uint8, 'table_id'), # /* Identifier of table. Lower numbered tables are consulted first. */
(uint8[3],), # /* Align to 32-bits. */
(char[OFP_MAX_TABLE_NAME_LEN], 'name'),
(ofp_flow_wildcards, 'wildcards'), # /* Bitmap of OFPFW_* wildcards that are supported by the table. */
(uint32, 'max_entries'), # /* Max number of entries supported. */
(uint32, 'active_count'), # /* Number of active entries. */
(uint64, 'lookup_count'), # /* # of packets looked up in table. */
(uint64, 'matched_count'), # /* Number of packets that hit table. */
name = 'ofp_table_stats'
)
ofp_table_stats_reply = nstruct(
(ofp_table_stats[0], 'stats'),
name = 'ofp_table_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_TABLE,
classifyby = (OFPST_TABLE,),
init = packvalue(OFPST_TABLE, 'type')
)
'''
/* Stats request of type OFPST_PORT. */
'''
ofp_port_stats_request = nstruct(
(ofp_port, 'port_no'),
#/* OFPST_PORT message may request statistics for a single port (specified with port_no)
# or for all ports (port_no == OFPP_NONE). */
(uint8[6],),
name = 'ofp_port_stats_request',
base = ofp_stats_request,
criteria = lambda x: x.type == OFPST_PORT,
classifyby = (OFPST_PORT,),
init = packvalue(OFPST_PORT, 'type')
)
'''
/* Body of reply to OFPST_PORT request. If a counter is unsupported, set
* the field to all ones. */
'''
ofp_port_stats = nstruct(
(uint16, 'port_no'),
(uint8[6],),
(uint64, 'rx_packets'), # /* Number of received packets. */
(uint64, 'tx_packets'), # /* Number of transmitted packets. */
(uint64, 'rx_bytes'), # /* Number of received bytes. */
(uint64, 'tx_bytes'), # /* Number of transmitted bytes. */
(uint64, 'rx_dropped'), # /* Number of packets dropped by RX. */
(uint64, 'tx_dropped'), # /* Number of packets dropped by TX. */
(uint64, 'rx_errors'), # /* Number of receive errors. This is a
#super-set of receive errors and should be
#great than or equal to the sum of all
#rx_*_err values. */
(uint64, 'tx_errors'), # /* Number of transmit errors. This is a super-set of transmit errors. */
(uint64, 'rx_frame_err'), # /* Number of frame alignment errors. */
(uint64, 'rx_over_err'), # /* Number of packets with RX overrun. */
(uint64, 'rx_crc_err'), # /* Number of CRC errors. */
(uint64, 'collisions'), # /* Number of collisions. */
name = 'ofp_port_stats'
)
ofp_port_stats_reply = nstruct(
(ofp_port_stats[0], 'stats'),
name = 'ofp_port_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_PORT,
classifyby = (OFPST_PORT,),
init = packvalue(OFPST_PORT, 'type')
)
'''
/* All ones is used to indicate all queues in a port (for stats retrieval). */
'''
ofp_queue = enum('ofp_queue', globals(), uint32,
OFPQ_ALL = 0xffffffff)
'''
/* Body for stats request of type OFPST_QUEUE. */
'''
ofp_queue_stats_request = nstruct(
(ofp_port, 'port_no'), # /* All ports if OFPP_ALL. */
(uint8[2],), # /* Align to 32-bits. */
(ofp_queue, 'queue_id'), # /* All queues if OFPQ_ALL. */
name = 'ofp_queue_stats_request',
base = ofp_stats_request,
criteria = lambda x: x.type == OFPST_QUEUE,
classifyby = (OFPST_QUEUE,),
init = packvalue(OFPST_QUEUE, 'type')
)
'''
/* Body for stats reply of type OFPST_QUEUE consists of an array of this
* structure type. */
'''
ofp_queue_stats = nstruct(
(uint16, 'port_no'),
(uint8[2],), # /* Align to 32-bits. */
(uint32, 'queue_id'), # /* Queue id. */
(uint64, 'tx_bytes'), # /* Number of transmitted bytes. */
(uint64, 'tx_packets'), # /* Number of transmitted packets. */
(uint64, 'tx_errors'), # /* # of packets dropped due to overrun. */
name = 'ofp_queue_stats'
)
ofp_queue_stats_reply = nstruct(
(ofp_queue_stats[0], 'stats'),
name = 'ofp_queue_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_QUEUE,
classifyby = (OFPST_QUEUE,),
init = packvalue(OFPST_QUEUE, 'type')
)
'''
/* Vendor extension stats message. */
'''
ofp_vendor_stats_request = nstruct(
(experimenter_ids, 'vendor'),
name = 'ofp_vendor_stats_request',
base = ofp_stats_request,
criteria = lambda x: x.type == OFPST_VENDOR,
classifyby = (OFPST_VENDOR,),
init = packvalue(OFPST_VENDOR, 'type')
# /* Followed by vendor-defined arbitrary additional data. */
)
ofp_vendor_stats_reply = nstruct(
(experimenter_ids, 'vendor'),
name = 'ofp_vendor_stats_reply',
base = ofp_stats_reply,
criteria = lambda x: x.type == OFPST_VENDOR,
classifyby = (OFPST_VENDOR,),
init = packvalue(OFPST_VENDOR, 'type')
# /* Followed by vendor-defined arbitrary additional data. */
)
ofp_vendor_vendorid = 'vendor'
ofp_vendor_subtype = 'subtype'
ofp_action_vendor_vendorid = 'vendor'
ofp_action_vendor_subtype = 'subtype'
ofp_stats_vendor_vendorid = 'vendor'
ofp_stats_vendor_subtype = 'subtype'
from .nicira_ext import *
'''
/* Header for Nicira vendor requests and replies. */
'''
nicira_header = nstruct(
(nxt_subtype, 'subtype'),
name = 'nicira_header',
base = ofp_vendor,
criteria = lambda x: x.vendor == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'vendor'),
classifier = lambda x: x.subtype
)
'''
/* Header for Nicira-defined actions. */
'''
nx_action = nstruct(
(nx_action_subtype, 'subtype'), # /* NXAST_*. */
name = 'nx_action',
base = ofp_action_vendor,
criteria = lambda x: x.vendor == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'vendor'),
classifier = lambda x: x.subtype
)
nx_stats_request = nstruct(
(nx_stats_subtype, 'subtype'),
(uint8[4],),
base = ofp_vendor_stats_request,
criteria = lambda x: x.vendor == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'vendor'),
name = 'nx_stats_request',
classifier = lambda x: getattr(x, 'subtype')
)
nx_stats_reply = nstruct(
(nx_stats_subtype, 'subtype'),
(uint8[4],),
base = ofp_vendor_stats_reply,
criteria = lambda x: x.vendor == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'vendor'),
name = 'nx_stats_reply',
classifier = lambda x: getattr(x, 'subtype')
)
create_extension(globals(), nicira_header, nx_action, nx_stats_request, nx_stats_reply,
ofp_vendor_subtype, ofp_action_vendor_subtype, ofp_stats_vendor_subtype)
|
projects/OneSeg/oneseg/config.py | simon108018/OneNet | 611 | 12673962 | # -*- coding: utf-8 -*-
#
# Modified by <NAME>
# Contact: <EMAIL>
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_onenet_config(cfg):
"""
Add config for OneNet.
"""
cfg.MODEL.OneNet = CN()
cfg.MODEL.OneNet.NUM_CLASSES = 80
cfg.MODEL.OneNet.HEAD = "FCOS"
# Head.
# cfg.MODEL.OneNet.IN_FEATURES = ["res2", "res3", "res4", "res5"]
cfg.MODEL.OneNet.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
cfg.MODEL.OneNet.FEATURES_STRIDE = [8, 16, 32, 64, 128]
cfg.MODEL.OneNet.NUM_CONV = 4
cfg.MODEL.OneNet.CONV_NORM = "GN"
cfg.MODEL.OneNet.CONV_CHANNELS = 256
cfg.MODEL.OneNet.ACTIVATION = 'relu'
cfg.MODEL.OneNet.NMS = False # for ablation
# Deconv
cfg.MODEL.OneNet.DECONV_CHANNEL= [2048, 256, 128, 64]
cfg.MODEL.OneNet.DECONV_KERNEL = [4, 4, 4]
cfg.MODEL.OneNet.DCN = True
cfg.MODEL.OneNet.MODULATE_DEFORM = True
# Loss.
cfg.MODEL.OneNet.CLASS_WEIGHT = 2.0
cfg.MODEL.OneNet.GIOU_WEIGHT = 2.0
cfg.MODEL.OneNet.L1_WEIGHT = 5.0
# Focal Loss.
cfg.MODEL.OneNet.ALPHA = 0.25
cfg.MODEL.OneNet.GAMMA = 2.0
cfg.MODEL.OneNet.PRIOR_PROB = 0.01
# Optimizer.
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0
cfg.MODEL.CONDINST = CN()
cfg.MODEL.CONDINST.SIZES_OF_INTEREST = [64, 128, 256, 512]
# the downsampling ratio of the final instance masks to the input image
cfg.MODEL.CONDINST.MASK_OUT_STRIDE = 4
cfg.MODEL.CONDINST.BOTTOM_PIXELS_REMOVED = -1
# if not -1, we only compute the mask loss for MAX_PROPOSALS random proposals PER GPU
cfg.MODEL.CONDINST.MAX_PROPOSALS = -1
# if not -1, we only compute the mask loss for top `TOPK_PROPOSALS_PER_IM` proposals
# PER IMAGE in terms of their detection scores
cfg.MODEL.CONDINST.TOPK_PROPOSALS_PER_IM = -1
cfg.MODEL.CONDINST.MASK_HEAD = CN()
cfg.MODEL.CONDINST.MASK_HEAD.CHANNELS = 16
cfg.MODEL.CONDINST.MASK_HEAD.NUM_LAYERS = 3
cfg.MODEL.CONDINST.MASK_HEAD.USE_FP16 = False
cfg.MODEL.CONDINST.MASK_HEAD.DISABLE_REL_COORDS = False
cfg.MODEL.CONDINST.MASK_BRANCH = CN()
cfg.MODEL.CONDINST.MASK_BRANCH.OUT_CHANNELS = 8
cfg.MODEL.CONDINST.MASK_BRANCH.IN_FEATURES = ["p3", "p4", "p5"]
cfg.MODEL.CONDINST.MASK_BRANCH.CHANNELS = 128
cfg.MODEL.CONDINST.MASK_BRANCH.NORM = "BN"
cfg.MODEL.CONDINST.MASK_BRANCH.NUM_CONVS = 4
cfg.MODEL.CONDINST.MASK_BRANCH.SEMANTIC_LOSS_ON = False
|
recipes/Python/266586_Simple_XOR_keyword_Encryption/recipe-266586.py | tdiprima/code | 2,023 | 12673989 | #PEcrypt - use a string key to encrypt/decrypt another string
# - <NAME> - January 2004
class PEcrypt:
"""
PEcrypt - very, very simple word key encryption system
uses cyclic XOR between the keyword character
bytes and the string to be encrypted/decrypted.
Therefore, the same function and keyword will
encrypt the string the first time and decrypt
it if called on the encrypted string.
"""
def __init__(self, aKey):
"""
Initialise the class with the key that
is used to encrypt/decrypt strings
"""
self.key = aKey
# CRC can be used to validate a key (very roughly)
# if you store the CRC from a previous keyword
# and then compare with a newly generated one and
# they are the same then chances are the keyword
# is correct - only a single byte so not that reliable
self.crc = 0
for x in self.key:
intX = ord(x)
self.crc = self.crc ^ intX
def Crypt(self, aString):
"""
Encrypt/Decrypt the passed string object and return
the encrypted string
"""
kIdx = 0
cryptStr = "" # empty 'crypted string to be returned
# loop through the string and XOR each byte with the keyword
# to get the 'crypted byte. Add the 'crypted byte to the
# 'crypted string
for x in range(len(aString)):
cryptStr = cryptStr + \
chr( ord(aString[x]) ^ ord(self.key[kIdx]))
# use the mod operator - % - to cyclically loop through
# the keyword
kIdx = (kIdx + 1) % len(self.key)
return cryptStr
if __name__ == "__main__":
def strToHex(aString):
hexStr = ""
for x in aString:
hexStr = hexStr + "%02X " % ord(x)
return hexStr
# self test routine
print "\nTesting PEcrypt!"
print "----------------\n"
keyStr = "This is a key"
testStr = "The quick brown fox jumps over the lazy dog!"
print "\nString : ", testStr
print "in hex : ", strToHex(testStr)
print "key : ", keyStr
pe = PEcrypt(keyStr) # generate the PEcrypt instance
print "\nPEcrypt CRC = %02X" % pe.crc
testStr = pe.Crypt(testStr)
print "\nEncrypted string"
print "Ascii : ", testStr
print "Hex : ", strToHex(testStr)
testStr = pe.Crypt(testStr)
print "\nDecrypted string"
print "Ascii : ", testStr
print "Hex : ", strToHex(testStr)
|
tests/unit/test_signature_verify.py | windies21/loopchain | 105 | 12674001 | import json
import logging
import os
from pathlib import Path
import plyvel
import pytest
from pkg_resources import parse_version
from plyvel._plyvel import Error
from loopchain.blockchain import TransactionVerifier
from loopchain.blockchain.blocks import BlockVersioner, BlockVerifier, BlockSerializer
from loopchain.blockchain.transactions import TransactionVersioner
Logger = logging.getLogger(__name__)
@pytest.fixture
def base_dir() -> Path:
# FIXME : base_dir that you want to test
base = Path(os.getcwd()).parents[1]
return base
@pytest.fixture
def plyvel_db(base_dir) -> plyvel.DB:
base_dir = base_dir / '.storage'
db_path = Path()
Logger.info(f"base_dir : {base_dir}")
if not os.path.exists(base_dir):
pytest.skip(f"'{base_dir}' does not exist")
for path in os.listdir(base_dir):
if path.startswith('db') and path.endswith('icon_dex'):
db_path = base_dir / path
break
Logger.info(f"db_path : {db_path}")
db = None
try:
db = plyvel.DB(db_path.as_posix())
except (Error, IOError):
pytest.skip("db data must be prepared for this verify test")
return db
@pytest.fixture
def block_versioner():
block_versioner = BlockVersioner()
# FIXME : block versions
mainnet_test = False
if mainnet_test:
block_versions = {
"0.1a": 0,
"0.3": 10324749,
"0.4": 12640761,
"0.5": 14473622
}
else:
block_versions = {
"0.1a": 0,
"0.4": 1,
"0.5": 30
}
for version, height in block_versions.items():
block_versioner.add_version(height, version)
return block_versioner
@pytest.fixture
def tx_versioner():
hash_versions = {
"genesis": 0,
"0x2": 1,
"0x3": 1
}
tx_versioner = TransactionVersioner()
for tx_version, tx_hash_version in hash_versions.items():
tx_versioner.hash_generator_versions[tx_version] = tx_hash_version
return tx_versioner
class TestSignatureVerify:
def test_verify(self, plyvel_db, block_versioner, tx_versioner):
"""
1. prepare plyvel db, block_versioner, tx_versioner
2. pick block, transaction, vote, etc from db
3. verify block, vote transaction, vote, etc...
"""
# given db instance, block_versioner, tx_versioner
block_key = plyvel_db.get(b'last_block_key')
while True:
# when get block from db
block_dumped = plyvel_db.get(block_key)
Logger.info(f"block_dump : {block_dumped}")
block_serialized = json.loads(block_dumped)
block_height = block_versioner.get_height(block_serialized)
block_version = block_versioner.get_version(block_height)
block_serializer = BlockSerializer.new(block_version, tx_versioner)
block = block_serializer.deserialize(block_serialized)
Logger.info(f"block_height : {block_height}, block_version : {block_version}")
if block_height == 0:
break
# then block verify
block_verifier = BlockVerifier.new(block_version, tx_versioner)
block_verifier.verify_signature(block)
# then vote verify
if parse_version(block_version) >= parse_version("0.3"):
Logger.info(f"leader_votes : {block.body.leader_votes}")
for leader_vote in block.body.leader_votes:
if not leader_vote:
continue
leader_vote.verify()
Logger.info(f"prev_votes : {block.body.prev_votes}")
for block_vote in block.body.prev_votes:
if not block_vote:
continue
block_vote.verify()
# then transaction verify
for tx in block.body.transactions.values():
tv = TransactionVerifier.new(tx.version, tx.type(), tx_versioner)
tv.verify_signature(tx)
Logger.info(f"prev_hash : {block.header.prev_hash}, {bytes(block.header.prev_hash)}")
block_key = block.header.prev_hash.hex().encode("utf-8")
|
synthtiger/components/wrapper/__init__.py | KoryakovDmitry/synthtiger | 153 | 12674003 | <filename>synthtiger/components/wrapper/__init__.py
"""
SynthTIGER
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
from synthtiger.components.wrapper.iterator import Iterator
from synthtiger.components.wrapper.selector import Selector
from synthtiger.components.wrapper.switch import Switch
__all__ = ["Iterator", "Selector", "Switch"]
|
Server/integrations/azuread/AzureADAuthenticationForGluu.py | rkondratenko/oxAuth | 380 | 12674014 | # Author: <NAME>
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import AuthenticationService, UserService
from org.gluu.oxauth.model.common import User
from org.gluu.util import StringHelper, ArrayHelper
from java.util import IdentityHashMap
from org.apache.commons.codec.binary import Base64
from java.lang import String
import httplib
import urllib
import json
import java
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, current_time_millis):
self.currentTimeMillis = current_time_millis
def init(self, customScript, configuration_attributes):
print "AzureAD. Initialization"
global azure_tenant_id
azure_tenant_id = configuration_attributes.get("azure_tenant_id").getValue2()
print "AzureAD. Initialization. Value of azure_tenant_id is %s" % azure_tenant_id
global azure_client_id
azure_client_id = configuration_attributes.get("azure_client_id").getValue2()
print "AzureAD. Initialization. Value of azure_client_id is %s" % azure_client_id
global azure_client_secret
azure_client_secret = configuration_attributes.get("azure_client_secret").getValue2()
global MICROSOFT_AUTHORITY_URL
MICROSOFT_AUTHORITY_URL = 'login.microsoftonline.com'
global AZURE_AD_GRAPH_RESOURCE_ENDPOINT
AZURE_AD_GRAPH_RESOURCE_ENDPOINT = 'https://graph.windows.net'
global azure_user_uuid
azure_user_uuid = "oid"
global gluu_ldap_uuid
gluu_ldap_uuid = "uid"
global ADMIN
ADMIN = 'admin'
global attributes_mapping
if (configuration_attributes.containsKey("azure_ad_attributes_list") and
configuration_attributes.containsKey("gluu_ldap_attributes_list")):
azure_ad_attributes_list = configuration_attributes.get("azure_ad_attributes_list").getValue2()
if StringHelper.isEmpty(azure_ad_attributes_list):
print "AzureAD: Initialization. The property azure_ad_attributes_list is empty"
return False
gluu_ldap_attributes_list = configuration_attributes.get("gluu_ldap_attributes_list").getValue2()
if StringHelper.isEmpty(gluu_ldap_attributes_list):
print "AzureAD: Initialization. The property gluu_ldap_attributes_list is empty"
return False
attributes_mapping = self.attribute_mapping_function(azure_ad_attributes_list, gluu_ldap_attributes_list)
if attributes_mapping is None:
print "AzureAD: Initialization. The attributes mapping isn't valid"
return False
print "AzureAD. Initialized successfully"
return True
@staticmethod
def attribute_mapping_function(azure_ad_attributes_list, gluu_ldap_attributes_list):
try:
azure_ad_attributes_list_array = StringHelper.split(azure_ad_attributes_list, ",")
if ArrayHelper.isEmpty(azure_ad_attributes_list_array):
print("AzureAD: There is no attributes specified in azure_ad_attributes_list property")
return None
gluu_ldap_attributes_list_array = StringHelper.split(gluu_ldap_attributes_list, ",")
if ArrayHelper.isEmpty(gluu_ldap_attributes_list_array):
print("AzureAD: There is no attributes specified in gluu_ldap_attributes_list property")
return None
if len(azure_ad_attributes_list_array) != len(gluu_ldap_attributes_list_array):
print("AzureAD: The number of attributes isn't equal")
return None
attributes_map = IdentityHashMap()
i = 0
count = len(azure_ad_attributes_list_array)
while i < count:
azure_ad_attribute = StringHelper.toLowerCase(azure_ad_attributes_list_array[i])
gluu_ldap_attribute = StringHelper.toLowerCase(gluu_ldap_attributes_list_array[i])
attributes_map.put(azure_ad_attribute, gluu_ldap_attribute)
i = i + 1
return attributes_map
except Exception, err:
print("AzureAD: Exception inside prepareAttributesMapping " + str(err))
def prepareForStep(self, configuration_attributes, request_parameters, step):
if step == 1:
print "AzureAD. Prepare for Step 1"
return True
else:
return False
def authenticate(self, configuration_attributes, request_parameters, step):
print "AzureAD. Inside authenticate. Step %d" % step
authentication_service = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
if step == 1:
print "AzureAD. Authenticate for step 1"
logged_in = self.authenticate_user_credentials(identity, authentication_service)
print "AzureAD. Status of User Credentials based Authentication : %r" % logged_in
if not logged_in:
return False
print "AzureAD. Authenticate successful for step %d" % step
return True
else:
return False
def authenticate_user_credentials(self, identity, authentication_service):
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.<PASSWORD>()
print "AzureAD. user_name: %s" % user_name
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
# Special condition to allow for Gluu admin login
if StringHelper.equals(user_name, ADMIN):
return self.authenticate_user_in_gluu_ldap(authentication_service, user_name, user_password)
# Authenticate user credentials with Azure AD non-interactively
azure_auth_response = self.authenticate_user_in_azure(azure_tenant_id, user_name, user_password, azure_client_id, azure_client_secret)
print "AzureAD. Value of azure_auth_response is %s" % azure_auth_response
azure_auth_response_json = json.loads(azure_auth_response)
if azure_user_uuid in azure_auth_response_json:
# Azure authentication has succeeded. User needs to be enrolled in Gluu LDAP
user = self.enroll_azure_user_in_gluu_ldap(azure_auth_response_json)
if user is None:
# User Enrollment in Gluu LDAP has failed
logged_in = False
else:
# Authenticating the user within Gluu
user_authenticated_in_gluu = authentication_service.authenticate(user.getUserId())
print "AzureAD: Authentication status of the user enrolled in Gluu LDAP %r " % user_authenticated_in_gluu
return user_authenticated_in_gluu
else:
# Azure authentication has failed.
logged_in = False
return logged_in
@staticmethod
def authenticate_user_in_azure(tenant_id, user_name, pwd, client_id, client_secret):
post_params_json = {'resource': AZURE_AD_GRAPH_RESOURCE_ENDPOINT, 'client_id': client_id,
'client_secret': client_secret, 'username': user_name, 'password': <PASSWORD>,
'grant_type': 'password', 'scope': 'openid'}
post_params_url_encoded = urllib.urlencode(post_params_json)
headers_json = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
conn = httplib.HTTPSConnection(MICROSOFT_AUTHORITY_URL + ':443')
relative_url = '/' + tenant_id + '/oauth2/token'
conn.request('POST', relative_url, post_params_url_encoded, headers_json)
response = conn.getresponse()
# print response.status, response.reason
azure_response = response.read()
conn.close()
# print "Response Data: %s" % azure_response
azure_response_json = json.loads(azure_response)
if 'id_token' in azure_response_json:
id_token = azure_response_json['id_token']
id_token_array = String(id_token).split("\\.")
id_token_payload = id_token_array[1]
id_token_payload_str = String(Base64.decodeBase64(id_token_payload), 'UTF-8')
return str(id_token_payload_str)
else:
return azure_response
def enroll_azure_user_in_gluu_ldap(self, azure_auth_response_json):
user_service = CdiUtil.bean(UserService)
azure_user_uuid_value = azure_auth_response_json[azure_user_uuid]
found_user = self.find_user_from_gluu_ldap_by_attribute(user_service, gluu_ldap_uuid, azure_user_uuid_value)
print "AzureAD. Value of found_user is %s" % found_user
if found_user is None:
new_user = User()
self.populate_user_obj_with_azure_user_data(new_user, azure_auth_response_json)
try:
# Add azure user in Gluu LDAP
found_user = user_service.addUser(new_user, True)
found_user_id = found_user.getUserId()
print("AzureAD: Azure User added successfully in Gluu LDAP " + found_user_id)
except Exception, err:
print("AzureAD: Error in adding azure user to Gluu LDAP:" + str(err))
return None
else:
self.populate_user_obj_with_azure_user_data(found_user, azure_auth_response_json)
try:
# Update the user in Gluu LDAP with latest values from Azure AD
found_user = user_service.updateUser(found_user)
found_user_id = found_user.getUserId()
print("AzureAD: Azure User updated successfully in Gluu LDAP " + found_user_id)
except Exception, err:
print("AzureAD: Error in updating azure user to Gluu LDAP:" + str(err))
return None
return found_user
@staticmethod
def populate_user_obj_with_azure_user_data(user, azure_auth_response_json):
# attributes_mapping = ["oid:uid", "given_name:givenName", "family_name:sn", "upn:mail"]
for attributesMappingEntry in attributes_mapping.entrySet():
azure_ad_attribute_key = attributesMappingEntry.getKey()
gluu_ldap_attribute_key = attributesMappingEntry.getValue()
gluu_ldap_attribute_value = "undefined"
if azure_ad_attribute_key in azure_auth_response_json:
gluu_ldap_attribute_value = azure_auth_response_json[azure_ad_attribute_key]
print gluu_ldap_attribute_key + ' : ' + gluu_ldap_attribute_value
if (gluu_ldap_attribute_key is not None) & (gluu_ldap_attribute_value != "undefined"):
user.setAttribute(gluu_ldap_attribute_key, gluu_ldap_attribute_value)
return None
@staticmethod
def authenticate_user_in_gluu_ldap(authentication_service, user_name, user_password):
return authentication_service.authenticate(user_name, user_password)
@staticmethod
def find_user_from_gluu_ldap_by_attribute(user_service, attribute_name, attribute_value):
return user_service.getUserByAttribute(attribute_name, attribute_value)
def getExtraParametersForStep(self, configuration_attributes, step):
return None
def getCountAuthenticationSteps(self, configuration_attributes):
return 1
def getPageForStep(self, configuration_attributes, step):
return ""
def destroy(self, configuration_attributes):
print "AzureAD. Destroy"
return True
def getAuthenticationMethodClaims(self, requestParameters):
return None
def getApiVersion(self):
return 11
def isValidAuthenticationMethod(self, usage_type, configuration_attributes):
return True
def getAlternativeAuthenticationMethod(self, usage_type, configuration_attributes):
return None
def logout(self, configuration_attributes, request_parameters):
return True
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None |
blueapps/account/sites/open/conf.py | qqqqqie/bk-sops | 881 | 12674019 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf import settings
class ConfFixture(object):
BACKEND_TYPE = "bk_token"
USER_BACKEND = "bk_token.backends.TokenBackend"
LOGIN_REQUIRED_MIDDLEWARE = "bk_token.middlewares.LoginRequiredMiddleware"
USER_MODEL = "bk_token.models.UserProxy"
CONSOLE_LOGIN_URL = settings.BK_PAAS_HOST
LOGIN_URL = settings.BK_LOGIN_URL + "/"
LOGIN_PLAIN_URL = settings.BK_LOGIN_URL + "/plain/"
VERIFY_URL = settings.BK_LOGIN_INNER_URL + "/accounts/is_login/"
USER_INFO_URL = settings.BK_LOGIN_INNER_URL + "/accounts/get_user/"
HAS_PLAIN = False
ADD_CROSS_PREFIX = False
ADD_APP_CODE = True
IFRAME_HEIGHT = 400
IFRAME_WIDTH = 400
WEIXIN_BACKEND_TYPE = "null"
WEIXIN_MIDDLEWARE = "null.NullMiddleware"
WEIXIN_BACKEND = "null.NullBackend"
SMS_CLIENT_MODULE = "cmsi"
SMS_CLIENT_FUNC = "send_sms"
SMS_CLIENT_USER_ARGS_NAME = "receiver__username"
SMS_CLIENT_CONTENT_ARGS_NAME = "content"
RIO_BACKEND_TYPE = "null"
RIO_MIDDLEWARE = "null.NullMiddleware"
RIO_BACKEND = "null.NullBackend"
BK_JWT_MIDDLEWARE = "bk_jwt.middlewares.BkJwtLoginRequiredMiddleware"
BK_JWT_BACKEND = "bk_jwt.backends.BkJwtBackend"
|
sdks/python/apache_beam/io/gcp/dicomio_integration_test.py | hengfengli/beam | 5,279 | 12674028 | <gh_stars>1000+
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration test for Google Cloud DICOM IO connector.
This e2e test will first create a temporary empty DICOM storage and send 18
DICOM files from gs://apache-beam-samples/healthcare/dicom/io_test_files to
it. The test will compare the metadata of a persistent DICOM storage, which
reprensets ground truths and has 18 files stored, to the temporary storage
in order to check if the connectors are functioning correctly.
"""
# pytype: skip-file
import datetime
import random
import string
import unittest
import pytest
import apache_beam as beam
from apache_beam.io import fileio
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp.dicomclient import DicomApiHttpClient
from apache_beam.io.gcp.dicomio import DicomSearch
from apache_beam.io.gcp.dicomio import UploadToDicomStore
from google.auth import default
from google.auth.transport import requests
except ImportError:
DicomSearch = None
# pylint: enable=wrong-import-order, wrong-import-position
REGION = 'us-central1'
DATA_SET_ID = 'apache-beam-integration-testing'
HEALTHCARE_BASE_URL = 'https://healthcare.googleapis.com/v1'
GCS_BASE_URL = 'https://storage.googleapis.com/storage/v1'
PERSISTENT_DICOM_STORE_NAME = "dicom_it_persistent_store"
BUCKET_NAME = 'apache-beam-samples'
DICOM_DIR_PATH = 'healthcare/dicom'
DICOM_FILES_PATH = 'gs://' + BUCKET_NAME + '/' + DICOM_DIR_PATH
METADATA_DIR_PATH = DICOM_DIR_PATH + '/io_test_metadata/'
META_DATA_ALL_NAME = 'Dicom_io_it_test_data.json'
META_DATA_REFINED_NAME = 'Dicom_io_it_test_refined_data.json'
NUM_INSTANCE = 18
RAND_LEN = 15
def random_string_generator(length):
letters_and_digits = string.ascii_letters + string.digits
result = ''.join((random.choice(letters_and_digits) for i in range(length)))
return result
def create_dicom_store(project_id, dataset_id, region, dicom_store_id):
# Create a an empty DICOM store
credential, _ = default()
session = requests.AuthorizedSession(credential)
api_endpoint = "{}/projects/{}/locations/{}".format(
HEALTHCARE_BASE_URL, project_id, region)
# base of dicomweb path.
dicomweb_path = "{}/datasets/{}/dicomStores".format(api_endpoint, dataset_id)
response = session.post(
dicomweb_path, params={"dicomStoreId": dicom_store_id})
response.raise_for_status()
return response.status_code
def delete_dicom_store(project_id, dataset_id, region, dicom_store_id):
# Delete an existing DICOM store
credential, _ = default()
session = requests.AuthorizedSession(credential)
api_endpoint = "{}/projects/{}/locations/{}".format(
HEALTHCARE_BASE_URL, project_id, region)
# base of dicomweb path.
dicomweb_path = "{}/datasets/{}/dicomStores/{}".format(
api_endpoint, dataset_id, dicom_store_id)
response = session.delete(dicomweb_path)
response.raise_for_status()
return response.status_code
def get_gcs_file_http(file_name):
# Get gcs file from REST Api
file_name = file_name.replace('/', '%2F')
api_endpoint = "{}/b/{}/o/{}?alt=media".format(
GCS_BASE_URL, BUCKET_NAME, file_name)
credential, _ = default()
session = requests.AuthorizedSession(credential)
response = session.get(api_endpoint)
response.raise_for_status()
return response.json()
@unittest.skipIf(DicomSearch is None, 'GCP dependencies are not installed')
class DICOMIoIntegrationTest(unittest.TestCase):
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.project = self.test_pipeline.get_option('project')
self.expected_output_all_metadata = get_gcs_file_http(
METADATA_DIR_PATH + META_DATA_ALL_NAME)
self.expected_output_refined_metadata = get_gcs_file_http(
METADATA_DIR_PATH + META_DATA_REFINED_NAME)
# create a temp Dicom store based on the time stamp
self.temp_dicom_store = "DICOM_store_" + datetime.datetime.now().strftime(
'%Y-%m-%d_%H%M%S.%f_') + random_string_generator(RAND_LEN)
create_dicom_store(self.project, DATA_SET_ID, REGION, self.temp_dicom_store)
def tearDown(self):
# clean up the temp Dicom store
delete_dicom_store(self.project, DATA_SET_ID, REGION, self.temp_dicom_store)
@pytest.mark.it_postcommit
def test_dicom_search_instances(self):
# Search and compare the metadata of a persistent DICOM store.
# Both refine and comprehensive search will be tested.
input_dict_all = {}
input_dict_all['project_id'] = self.project
input_dict_all['region'] = REGION
input_dict_all['dataset_id'] = DATA_SET_ID
input_dict_all['dicom_store_id'] = PERSISTENT_DICOM_STORE_NAME
input_dict_all['search_type'] = "instances"
input_dict_refine = {}
input_dict_refine['project_id'] = self.project
input_dict_refine['region'] = REGION
input_dict_refine['dataset_id'] = DATA_SET_ID
input_dict_refine['dicom_store_id'] = PERSISTENT_DICOM_STORE_NAME
input_dict_refine['search_type'] = "instances"
input_dict_refine['params'] = {
'StudyInstanceUID': 'study_000000001', 'limit': 500, 'offset': 0
}
expected_dict_all = {}
expected_dict_all['result'] = self.expected_output_all_metadata
expected_dict_all['status'] = 200
expected_dict_all['input'] = input_dict_all
expected_dict_all['success'] = True
expected_dict_refine = {}
expected_dict_refine['result'] = self.expected_output_refined_metadata
expected_dict_refine['status'] = 200
expected_dict_refine['input'] = input_dict_refine
expected_dict_refine['success'] = True
with self.test_pipeline as p:
results_all = (
p
| 'create all dict' >> beam.Create([input_dict_all])
| 'search all' >> DicomSearch())
results_refine = (
p
| 'create refine dict' >> beam.Create([input_dict_refine])
| 'search refine' >> DicomSearch())
assert_that(
results_all, equal_to([expected_dict_all]), label='all search assert')
assert_that(
results_refine,
equal_to([expected_dict_refine]),
label='refine search assert')
@pytest.mark.it_postcommit
def test_dicom_store_instance_from_gcs(self):
# Store DICOM files to a empty DICOM store from a GCS bucket,
# then check if the store metadata match.
input_dict_store = {}
input_dict_store['project_id'] = self.project
input_dict_store['region'] = REGION
input_dict_store['dataset_id'] = DATA_SET_ID
input_dict_store['dicom_store_id'] = self.temp_dicom_store
expected_output = [True] * NUM_INSTANCE
with self.test_pipeline as p:
gcs_path = DICOM_FILES_PATH + "/io_test_files/*"
results = (
p
| fileio.MatchFiles(gcs_path)
| fileio.ReadMatches()
| UploadToDicomStore(input_dict_store, 'fileio')
| beam.Map(lambda x: x['success']))
assert_that(
results, equal_to(expected_output), label='store first assert')
# Check the metadata using client
result, status_code = DicomApiHttpClient().qido_search(
self.project, REGION, DATA_SET_ID, self.temp_dicom_store, 'instances'
)
self.assertEqual(status_code, 200)
# List comparison based on different version of python
self.assertCountEqual(result, self.expected_output_all_metadata)
if __name__ == '__main__':
unittest.main()
|
dataset-construction/src/ndb_data/data_import/wikidata_index.py | j6mes/NeuralDB | 213 | 12674029 | #
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# This file is part of NeuralDB.
# See https://github.com/facebookresearch/NeuralDB for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bz2
import json
from collections import defaultdict
from json import JSONDecodeError
import pydash
from argparse import ArgumentParser
from tqdm import tqdm
from ndb_data.wikidata_common.wikidata import Wikidata
def read_dump(wikidata_file):
with bz2.open(wikidata_file, mode="rt") as f:
f.read(2)
for line in f:
yield line.rstrip(",\n")
def get_indexable(instance):
wikidata_id = pydash.get(instance, "id")
english_name = pydash.get(instance, "labels.en.value")
claims = pydash.get(instance, "claims")
properties = set()
property_entity = defaultdict(list)
for property, claims in claims.items():
properties.add(property)
for claim in claims:
property_entity[property].append(
(
pydash.get(claim, "mainsnak.datavalue.value"),
list(pydash.get(claim, "qualifiers").values())
if pydash.get(claim, "qualifiers") is not None
else None,
)
)
sitelinks = pydash.get(instance, "sitelinks")
enwiki = pydash.get(instance, "sitelinks.enwiki.title")
yield wikidata_id, english_name, sitelinks, enwiki, list(properties), dict(
property_entity
)
def index_dump(dump):
for idx, line in enumerate(dump):
try:
yield from get_indexable(json.loads(line))
except JSONDecodeError as e:
print(e)
pass
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("wikidata_file")
args = parser.parse_args()
wiki = Wikidata()
collection = wiki.collection
insert_count = 0
dump = read_dump(args.wikidata_file)
batch = []
_tqdm_iter = tqdm(index_dump(dump), total=90e6)
for w_id, e_name, sitelinks, enwiki, props, prop_dict in _tqdm_iter:
batch.append(
{
"wikidata_id": w_id,
"english_name": e_name,
"english_wiki": enwiki,
"property_types": props,
"properties": prop_dict,
"sitelinks": list(sitelinks.values()),
}
)
if len(batch) >= 5000:
collection.insert_many(batch)
batch = []
insert_count += 1
_tqdm_iter.desc = f"Insert batch {insert_count}"
print("last")
collection.insert_many(batch)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.